diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 000000000000..306150a2f874 --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = ["tests/**"] + +exclude_patterns = ["docs/**"] + +[[analyzers]] +name = "python" +enabled = true + + [analyzers.meta] + runtime_version = "3.x.x" diff --git a/.dockerignore b/.dockerignore index 5e9ee95f931a..29e6912ac6c6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,4 +3,4 @@ docs .git* **/*.pyc **/__pycache__ - +!docker/configs diff --git a/.github/ISSUE_TEMPLATE/ask_question.md b/.github/ISSUE_TEMPLATE/ask_question.md deleted file mode 100644 index 1a85b4ac720e..000000000000 --- a/.github/ISSUE_TEMPLATE/ask_question.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -name: Ask a question -about: If you have a "How do I?" question please ask in the forum https://forum.rasa.com -title: '' -labels: question -assignees: '' - ---- - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index dbff0564699c..54b20e6da142 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Bug report about: Create a report to help us improve title: '' -labels: bug +labels: 'area:rasa-oss :ferris_wheel:,type:bug :bug:' assignees: '' --- @@ -13,6 +13,8 @@ If you are asking a usage question (e.g. "How do I do xyz") please post your que **Rasa version**: +**Rasa SDK version** (if used & relevant): + **Rasa X version** (if used & relevant): **Python version**: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000000..36653a0f09e7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Ask a question + url: https://forum.rasa.com/ + about: If you have a "How do I?" question please ask in the forum https://forum.rasa.com diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 9bd85cd8df27..6de2e9f86d9e 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,7 @@ name: Feature request about: Suggest an idea on how to improve Rasa title: '' -labels: enhancement +labels: 'area:rasa-oss :ferris_wheel:,type:enhancement :sparkles:' assignees: '' --- diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1bb5fe8ddaa6..d4b3a8eefcea 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,8 +2,7 @@ - ... **Status (please check what you already did)**: -- [ ] made PR ready for code review - [ ] added some tests for the functionality - [ ] updated the documentation -- [ ] updated the changelog -- [ ] reformat files using `black` (please check [Readme](https://github.com/RasaHQ/rasa_nlu#code-style) for instructions) +- [ ] updated the changelog (please check [changelog](https://github.com/RasaHQ/rasa/tree/master/changelog) for instructions) +- [ ] reformat files using `black` (please check [Readme](https://github.com/RasaHQ/rasa#code-style) for instructions) diff --git a/.github/configs/mr-test-example.yaml b/.github/configs/mr-test-example.yaml new file mode 100644 index 000000000000..c9083a2ac8c3 --- /dev/null +++ b/.github/configs/mr-test-example.yaml @@ -0,0 +1,32 @@ +## Example configuration +#################### syntax ################# +## include: +## - dataset: [""] +## config: [""] +# +## Example: +## include: +## - dataset: ["Carbon Bot"] +## config: ["Sparse + DIET(bow) + ResponseSelector(bow)"] +# +## Shortcut: +## You can use the "all" shortcut to include all available configurations or datasets +# +## Example: Use the "Sparse + EmbeddingIntent + ResponseSelector(bow)" configuration +## for all available datasets +## include: +## - dataset: ["all"] +## config: ["Sparse + DIET(bow) + ResponseSelector(bow)"] +# +## Example: Use all available configurations for the "Carbon Bot" and "Sara" datasets +## and for the "Hermit" dataset use the "Sparse + DIET + ResponseSelector(T2T)" and +## "Sparse + ConveRT + DIET + ResponseSelector(T2T)" configurations: +## include: +## - dataset: ["Carbon Bot", "Sara"] +## config: ["all"] +## - dataset: ["Hermit"] +## config: ["Sparse + DIET(seq) + ResponseSelector(t2t)", "Sparse + ConveRT + DIET(seq) + ResponseSelector(t2t)"] + +include: + - dataset: ["Carbon Bot"] + config: ["Sparse + DIET(bow) + ResponseSelector(bow)"] diff --git a/.github/configs/mr-test-schedule.yaml b/.github/configs/mr-test-schedule.yaml new file mode 100644 index 000000000000..2b298ad7ef56 --- /dev/null +++ b/.github/configs/mr-test-schedule.yaml @@ -0,0 +1,63 @@ +########## +## Available datasets +########## +# - "Carbon Bot" +# - "Hermit" +# - "Private 1" +# - "Private 2" +# - "Private 3" +# - "Sara" + +########## +## Available configurations +########## +# - "BERT + DIET(bow) + ResponseSelector(bow)" +# - "BERT + DIET(seq) + ResponseSelector(t2t)" +# - "ConveRT + DIET(bow) + ResponseSelector(bow)" +# - "ConveRT + DIET(seq) + ResponseSelector(t2t)" +# - "Spacy + DIET(bow) + ResponseSelector(bow)" +# - "Spacy + DIET(seq) + ResponseSelector(t2t)" +# - "Sparse + ConveRT + DIET(bow) + ResponseSelector(bow)" +# - "Sparse + ConveRT + DIET(seq) + ResponseSelector(t2t)" +# - "Sparse + DIET(bow) + ResponseSelector(bow)" +# - "Sparse + DIET(seq) + ResponseSelector(t2t)" +# - "Sparse + Spacy + DIET(bow) + ResponseSelector(bow)" +# - "Sparse + Spacy + DIET(seq) + ResponseSelector(t2t)" + +## Example configuration +#################### syntax ################# +## include: +## - dataset: +## config: +# +#include: +# - dataset: "Carbon Bot" +# config: "Sparse + EmbeddingIntent + ResponseSelector(bow)" +### +# matrix +### +# dataset: [] +# config: [] + + +dataset: + - "Carbon Bot" + - "Sara" + - "Hermit" + - "Private 1" + - "Private 2" + - "Private 3" + +config: + - "BERT + DIET(bow) + ResponseSelector(bow)" + - "BERT + DIET(seq) + ResponseSelector(t2t)" + - "ConveRT + DIET(bow) + ResponseSelector(bow)" + - "ConveRT + DIET(seq) + ResponseSelector(t2t)" + - "Spacy + DIET(bow) + ResponseSelector(bow)" + - "Spacy + DIET(seq) + ResponseSelector(t2t)" + - "Sparse + ConveRT + DIET(bow) + ResponseSelector(bow)" + - "Sparse + ConveRT + DIET(seq) + ResponseSelector(t2t)" + - "Sparse + DIET(bow) + ResponseSelector(bow)" + - "Sparse + DIET(seq) + ResponseSelector(t2t)" + - "Sparse + Spacy + DIET(bow) + ResponseSelector(bow)" + - "Sparse + Spacy + DIET(seq) + ResponseSelector(t2t)" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..8163a4430768 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,21 @@ +version: 2 +updates: +- package-ecosystem: pip + directory: "/" + schedule: + interval: monthly + time: '13:00' + pull-request-branch-name: + separator: "-" + open-pull-requests-limit: 10 + reviewers: + - alwx + labels: + - type:dependencies + ignore: + - dependency-name: prompt-toolkit + versions: + - "> 2.0.10" + - dependency-name: pytest-asyncio + versions: + - "> 0.10.0" diff --git a/.github/no-response.yml b/.github/no-response.yml index cfee11a6f893..baba728ee844 100644 --- a/.github/no-response.yml +++ b/.github/no-response.yml @@ -3,7 +3,7 @@ # Number of days of inactivity before an Issue is closed for lack of response daysUntilClose: 14 # Label requiring a response -responseRequiredLabel: more-information-needed +responseRequiredLabel: status:more-details-needed # Comment to post when closing an Issue for lack of response. Set to `false` to disable closeComment: > This issue has been automatically closed because there has been no response diff --git a/.github/runner/github-runner-deployment.yaml.tmpl b/.github/runner/github-runner-deployment.yaml.tmpl new file mode 100644 index 000000000000..f43a20269490 --- /dev/null +++ b/.github/runner/github-runner-deployment.yaml.tmpl @@ -0,0 +1,74 @@ +# GitHub Runner deployment - uses to deploy a github runner +# which is used by the CI for model regression tests +apiVersion: apps/v1 +kind: Deployment +metadata: + name: github-runner-{{getenv "GITHUB_RUN_ID"}} + namespace: github-runner + labels: + app: github-runner + pod: github-runner-{{getenv "GITHUB_RUN_ID"}} +spec: + replicas: 1 + selector: + matchLabels: + app: github-runner + pod: github-runner-{{getenv "GITHUB_RUN_ID"}} + template: + metadata: + labels: + app: github-runner + pod: github-runner-{{getenv "GITHUB_RUN_ID"}} + spec: + priorityClassName: high-priority + automountServiceAccountToken: false + terminationGracePeriodSeconds: 180 + containers: + - name: github-runner + image: us.gcr.io/rasa-platform/github-runner:latest + imagePullPolicy: Always + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 15 + failureThreshold: 3 + exec: + command: + - /bin/bash + - -c + - "if [[ `curl -sX GET -H \"Authorization: token ${GITHUB_PAT}\" \ + https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPOSITORY}/actions/runners | \ + jq -r '.runners[] | select(.name == \"'${POD_NAME}'\") | .status'` == \"offline\" ]]; then \ + echo \"The GitHub API returns offline status for the ${POD_NAME} runner\" && exit 1; fi" + resources: + limits: + nvidia.com/gpu: 1 + requests: + nvidia.com/gpu: 1 + memory: 10Gi + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # RUNNER_LABELS - defines labels + # with which a github-runner will be registered + - name: RUNNER_LABELS + value: "self-hosted,gpu,kubernetes" + # GITHUB_OWNER - a name of the repository owner + - name: GITHUB_OWNER + valueFrom: + secretKeyRef: + name: github-rasa + key: owner + # GITHUB_REPOSITORY - a name of the repository + - name: GITHUB_REPOSITORY + valueFrom: + secretKeyRef: + name: github-rasa + key: repository + # GITHUB_PAT - Personal Access Token + - name: GITHUB_PAT + valueFrom: + secretKeyRef: + name: github-rasa + key: pat diff --git a/.github/scripts/mr_generate_summary.py b/.github/scripts/mr_generate_summary.py new file mode 100644 index 000000000000..ebfad6928ad9 --- /dev/null +++ b/.github/scripts/mr_generate_summary.py @@ -0,0 +1,60 @@ +# Collect the results of the various model test runs which are done as part of +# The model regression CI pipeline and dump them as a single file artifact.This artifact will the then be published at the end of the tests. +import json +import os + +SUMMARY_FILE = os.environ["SUMMARY_FILE"] +CONFIG = os.environ["CONFIG"] +DATASET = os.environ["DATASET_NAME"] +task_mapping = { + "intent_report.json": "intent_classification", + "CRFEntityExtractor_report.json": "entity_prediction", + "DIETClassifier_report.json": "entity_prediction", + "response_selection_report.json": "response_selection", +} + + +def generate_json(file, task, data): + if not DATASET in data: + data = {DATASET: {CONFIG: {}}, **data} + elif not CONFIG in data[DATASET]: + data[DATASET] = {CONFIG: {}, **data[DATASET]} + + data[DATASET][CONFIG] = { + "accelerator_type": os.environ["ACCELERATOR_TYPE"], + "test_run_time": os.environ["TEST_RUN_TIME"], + "train_run_time": os.environ["TRAIN_RUN_TIME"], + "total_run_time": os.environ["TOTAL_RUN_TIME"], + **data[DATASET][CONFIG], + } + + data[DATASET][CONFIG][task] = {**read_results(file)} + + return data + + +def read_results(file): + with open(file) as json_file: + data = json.load(json_file) + + keys = ["accuracy", "weighted avg", "macro avg", "micro avg"] + result = {key: data[key] for key in keys if key in data} + + return result + + +if __name__ == "__main__": + data = {} + if os.path.exists(SUMMARY_FILE): + with open(SUMMARY_FILE) as json_file: + data = json.load(json_file) + + for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]): + for f in files: + if f not in task_mapping.keys(): + continue + + data = generate_json(os.path.join(dirpath, f), task_mapping[f], data) + + with open(SUMMARY_FILE, "w") as f: + json.dump(data, f, sort_keys=True, indent=2) diff --git a/.github/scripts/mr_publish_results.py b/.github/scripts/mr_publish_results.py new file mode 100644 index 000000000000..faf91390116d --- /dev/null +++ b/.github/scripts/mr_publish_results.py @@ -0,0 +1,67 @@ +# Send model regression test results to Segment with a summary +# of all test results. +import analytics +import datetime +import json +import os + +analytics.write_key = os.environ["SEGMENT_TOKEN"] + +task_mapping = { + "intent_report.json": "Intent Classification", + "CRFEntityExtractor_report.json": "Entity Prediction", + "DIETClassifier_report.json": "Entity Prediction", + "response_selection_report.json": "Response Selection", +} + + +def send_to_segment(context): + jobID = os.environ["GITHUB_RUN_ID"] + + analytics.identify( + jobID, {"name": "model-regression-tests", "created_at": datetime.datetime.now()} + ) + + analytics.track( + jobID, + "results", + { + "dataset": os.environ["DATASET_NAME"], + "workflow": os.environ["GITHUB_WORKFLOW"], + "config": os.environ["CONFIG"], + "pr_url": os.environ["PR_URL"], + "accelerator_type": os.environ["ACCELERATOR_TYPE"], + "test_run_time": os.environ["TEST_RUN_TIME"], + "train_run_time": os.environ["TRAIN_RUN_TIME"], + "total_run_time": os.environ["TOTAL_RUN_TIME"], + "github_run_id": os.environ["GITHUB_RUN_ID"], + "github_sha": os.environ["GITHUB_SHA"], + "github_event": os.environ["GITHUB_EVENT_NAME"], + **context, + }, + ) + + +def read_results(file): + with open(file) as json_file: + data = json.load(json_file) + + keys = ["accuracy", "weighted avg", "macro avg", "micro avg"] + result = {key: data[key] for key in keys if key in data} + + return result + + +def push_results(file_name, file): + result = read_results(file) + result["file_name"] = file_name + result["task"] = task_mapping[file_name] + send_to_segment(result) + + +if __name__ == "__main__": + for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]): + for f in files: + if any(f.endswith(valid_name) for valid_name in task_mapping.keys()): + push_results(f, os.path.join(dirpath, f)) + analytics.flush() diff --git a/.github/stale.yml b/.github/stale.yml index 796a0c28ac8d..d39142e948b9 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -4,8 +4,12 @@ daysUntilStale: 90 daysUntilClose: 7 # Issues with these labels will never be considered stale exemptLabels: - - enhancement - - discussion + - "type:enhancement ✨" + - "type:enhancement :sparkles:" + - "type:discussion 👨‍👧‍👦" + - "type:discussion :family_man_girl_boy:" + - "type:docs 📖" + - "type:docs :book:" # Label to use when marking an issue as stale staleLabel: stale # Comment to post when marking an issue as stale. Set to `false` to disable diff --git a/.github/templates/README.md b/.github/templates/README.md new file mode 100644 index 000000000000..8bd722710614 --- /dev/null +++ b/.github/templates/README.md @@ -0,0 +1,58 @@ +# gomplate templates for GitHub Actions + +This document describes gomplate templates use for GitHub Actions. + +## Requirements + +You have to have installed [gomplate](https://docs.gomplate.ca/installing/) tool in order to render a template file. + +> gomplate is a template renderer which supports a growing list of datastores, such as: JSON (including EJSON - encrypted JSON), YAML, AWS EC2 metadata, BoltDB, Hashicorp Consul and Hashicorp Vault secrets. + +## Templates + +Below you can find a list of templates with their description and the commands to render them. + + +### `configuration_variables.tmpl` + +The template maps dataset name and configuration name for the model regression tests into paths where files are located. As a result, the template returns two environment variables `DATASET` and `CONFIG` which contain paths to file/directory. + +#### How to run locally + +``` +gomplate -d mapping= -f .github/templates/configuration_variables.tmpl +``` + +### `model_regression_test_config_comment.tmpl` + +The template returns a comment message which is used as a help description in a PR. The template reads the `.github/configs/mr-test-example.yaml` file and include it as example content. + +The help message is triggered by adding `status:model-regression-tests` label. +Comment with a help message is added if a PR doesn't contain a comment with a configuration for the model regression tests. +#### How to run locally + +``` +gomplate -f .github/templates/model_regression_test_config_comment.tmpl +``` + +The template uses the `GITHUB_ACTOR` environment variable, you have to export the variable before executing the command. + +### `model_regression_test_config_to_json.tmpl` + +The template reads an issue/a PR comment and transforms a YAML code block into JSON. + +#### How to run locally + +``` +gomplate -d github=https://api.github.com/repos/${{ github.repository }}/issues/comments/${{ comment-id }} -H 'github=Authorization:token ${{ secrets.GITHUB_TOKEN }}' -f .github/templates/model_regression_test_config_to_json.tmpl +``` + +### `model_regression_test_results.tmpl` + +The template reads a file with a report (the report file is available as an artifact in the model regression tests workflow) and returns markdown table with a summary of tests. + +#### How to run locally + +``` +gomplate -d data=report.json -f .github/templates/model_regression_test_results.tmpl +``` diff --git a/.github/templates/configuration_variables.tmpl b/.github/templates/configuration_variables.tmpl new file mode 100644 index 000000000000..82e3cee745df --- /dev/null +++ b/.github/templates/configuration_variables.tmpl @@ -0,0 +1,14 @@ +{{- /* + +The template maps dataset name and configuration name for the model +regression tests into paths where files are located. As a result, +the template returns two environment variables `DATASET` and `CONFIG` +which contain paths to file/directory. + + */ -}} +{{- $mapping := (datasource "mapping") -}} +{{- $dataset := (index $mapping.datasets (getenv "DATASET_NAME")) -}} +{{- $config := (index $mapping.configurations (getenv "CONFIG_NAME")) -}} + +export DATASET="{{ $dataset.path }}" +export CONFIG="{{ $dataset.language }}/{{ $config }}" diff --git a/.github/templates/model_regression_test_config_comment.tmpl b/.github/templates/model_regression_test_config_comment.tmpl new file mode 100644 index 000000000000..4d078b578639 --- /dev/null +++ b/.github/templates/model_regression_test_config_comment.tmpl @@ -0,0 +1,34 @@ +{{- /* + +The template returns a comment message which is used as a help description +in a PR. The template reads the `.github/configs/mr-test-example.yaml` file +and include it as example content. + +The help message is triggered by adding `status:model-regression-tests` label. +Comment with a help message is added if a PR doesn't contain a comment +with a configuration for the model regression tests. + + */ -}} +Hey @{{ .Env.GITHUB_ACTOR }}! :wave: To run model regression tests, comment with the `/modeltest` command and a configuration. + +_Tips :bulb:: The model regression test will be run on `push` events. You can re-run the tests by re-add `status:model-regression-tests` label or use a `Re-run jobs` button in Github Actions workflow._ + +_Tips :bulb:: Every time when you want to change a configuration you should edit the comment with the previous configuration._ + +You can copy this in your comment and customize: + +> /modeltest +> ~~~yml +>```yml +>########## +>## Available datasets +>########## +{{range (coll.Keys (datasource "mapping").datasets)}}># - "{{.}}"{{"\n"}}{{ end -}} +> +>########## +>## Available configurations +>########## +{{range (coll.Keys (datasource "mapping").configurations)}}># - "{{.}}"{{"\n"}}{{ end -}} +> +{{range split (file.Read ".github/configs/mr-test-example.yaml") "\n"}}>{{.}}{{"\n"}}{{ end -}} +>``` diff --git a/.github/templates/model_regression_test_config_to_json.tmpl b/.github/templates/model_regression_test_config_to_json.tmpl new file mode 100644 index 000000000000..4ba14a13a456 --- /dev/null +++ b/.github/templates/model_regression_test_config_to_json.tmpl @@ -0,0 +1,31 @@ +{{- /* + +The template reads an issue/a PR comment and transforms a YAML code block into JSON. + +*/ -}} +{{- $config := ((datasource "github").body | regexp.Find "```(?s)(.*)```" | regexp.ReplaceLiteral "```.*|\r" "" | yaml | toJSON | json) -}} +{"include":[ +{{- $inc := coll.Slice -}} +{{- $dataset := coll.Slice -}} +{{- range $pair := $config.include -}} +{{- /* use all available datasets if value is equal to all */ -}} +{{- if eq (index $pair.dataset 0) "all" -}} +{{ $dataset = (coll.Keys (datasource "mapping").datasets) }} +{{- else -}} +{{- $dataset = $pair.dataset -}} +{{- end -}} +{{- range $index_dataset, $value_dataset := $dataset -}} +{{- range $index_config, $value_config := $pair.config -}} +{{- /* use all available configurations if value is equal to all */ -}} +{{- if eq $value_config "all" -}} +{{- range $config_name, $config_file := (datasource "mapping").configurations -}} +{{ $inc = (coll.Append (dict "dataset" $value_dataset "config" $config_name | toJSON) $inc) -}} +{{- end -}} +{{- else -}} +{{ $inc = (coll.Append (dict "dataset" $value_dataset "config" $value_config | toJSON) $inc) -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- join $inc "," -}} +]} diff --git a/.github/templates/model_regression_test_results.tmpl b/.github/templates/model_regression_test_results.tmpl new file mode 100644 index 000000000000..3fca8c42b8ff --- /dev/null +++ b/.github/templates/model_regression_test_results.tmpl @@ -0,0 +1,34 @@ +{{- /* + +The template reads a file with a report (the report file is available +as an artifact in the model regression tests workflow) and returns +a markdown table with a summary of the tests. + +*/ -}} +{{ range $dataset, $config := (datasource "data")}} +Dataset: `{{$dataset}}` + +| Configuration | Intent Classification Micro F1 | Entity Recognition Micro F1 | Response Selection Micro F1 | +|---------------|-----------------|-----------------|-------------------| +{{ range $config_name, $config_data := $config -}} +| `{{ $config_name }}`
test: `{{ $config_data.test_run_time }}`, train: `{{ $config_data.train_run_time }}`, total: `{{ $config_data.total_run_time }}` | +{{- if has $config_data "intent_classification" -}} +{{- $intent_class := index $config_data.intent_classification -}} +{{- if (has (index $intent_class "micro avg") "f1-score") -}}{{ printf "%.4f" (index (index $intent_class "micro avg") "f1-score") }}{{- else if (has $intent_class "accuracy") -}}{{ printf "%.4f" $intent_class.accuracy }}{{- else -}}`no data`{{- end -}} | +{{- else -}} +`no data`| +{{- end -}} +{{- if has $config_data "entity_prediction" -}} +{{- $entity_class := $config_data.entity_prediction -}} +{{- if (has (index $entity_class "micro avg") "f1-score") -}}{{ printf "%.4f" (index (index $entity_class "micro avg") "f1-score") }}{{- else if (has $entity_class "accuracy") -}}{{ printf "%.4f" $entity_class.accuracy }}{{- else -}}`no data`{{- end -}} | +{{- else -}} +`no data`| +{{- end -}} +{{- if has $config_data "response_selection" -}} +{{- $response_class := $config_data.response_selection -}} +{{- if (has (index $response_class "micro avg") "f1-score") -}}{{ printf "%.4f" (index (index $response_class "micro avg") "f1-score") }}{{- else if (has $response_class "accuracy") -}}{{ printf "%.4f" $response_class.accuracy }}{{- else -}}`no data`{{- end -}} | +{{- else -}} +`no data`| +{{- end }} +{{end}} +{{end}} diff --git a/.github/workflows/automatic-pr-merger.yml b/.github/workflows/automatic-pr-merger.yml new file mode 100644 index 000000000000..31340b300738 --- /dev/null +++ b/.github/workflows/automatic-pr-merger.yml @@ -0,0 +1,25 @@ +name: Automatic PR Merger + +on: + push: {} # update PR when base branch is updated + status: {} # try to merge when other checks are completed + pull_request_review: # try to merge after review + types: + - submitted + - edited + - dismissed + pull_request: # try to merge if labels have changed (white/black list) + types: + - labeled + - unlabeled + +jobs: + # thats's all. single step is needed - if PR is mergeable according to + # branch protection rules it will be merged automatically + mergepal: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: rasahq/merge-pal-action@master + with: + token: ${{ secrets.RASABOT_AUTOMERGE_GITHUB_TOKEN || secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/ci-model-regression-on-schedule.yml b/.github/workflows/ci-model-regression-on-schedule.yml new file mode 100644 index 000000000000..a9160e8f028a --- /dev/null +++ b/.github/workflows/ci-model-regression-on-schedule.yml @@ -0,0 +1,231 @@ +# The docs: https://www.notion.so/rasa/The-CI-for-model-regression-tests-92af7185e08e4fb2a0c764770a8e9095 +name: CI - Model Regression + +on: + schedule: + # Run once a week + - cron: '1 23 * * */7' + +env: + GKE_ZONE: us-central1-a + +jobs: + read_test_configuration: + name: Reads tests configuration + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Checkout master + uses: actions/checkout@v2 + + - name: Download yq + run: | + curl --location https://github.com/mikefarah/yq/releases/download/3.3.0/yq_linux_amd64 -o yq + chmod +x yq + + - name: Check if a configuration file exists + run: test -f .github/configs/mr-test-schedule.yaml + + - name: Set matrix values + id: set-matrix + shell: bash + run: echo "::set-output name=matrix::$(./yq -j r .github/configs/mr-test-schedule.yaml)" + + deploy_runner_gpu: + name: Deploy Github Runner - GPU + needs: read_test_configuration + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Download gomplate + run: |- + curl -o gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + chmod 755 gomplate + + - name: Render deployment template + run: |- + ./gomplate -f .github/runner/github-runner-deployment.yaml.tmpl -o runner_deployment.yaml + + # Setup gcloud CLI + - uses: GoogleCloudPlatform/github-actions@0.1.2 + with: + service_account_key: ${{ secrets.GKE_SA_RASA_CI_GPU }} + service_account_email: ${{ secrets.GKE_RASA_CI_GPU_SA_NAME }} + + # Get the GKE credentials so we can deploy to the cluster + - run: |- + gcloud container clusters get-credentials "${{ secrets.GKE_GPU_CLUSTER }}" --zone "$GKE_ZONE" --project "${{ secrets.GKE_SA_RASA_CI_GPU_PROJECT }}" + + - name: Deploy Github Runner + run: |- + kubectl apply -f runner_deployment.yaml + kubectl -n github-runner rollout status --timeout=15m deployment/github-runner-$GITHUB_RUN_ID + + model_regression_test_gpu: + name: Model Regression Tests - GPU + continue-on-error: true + needs: + - deploy_runner_gpu + - read_test_configuration + env: + # Determine where CUDA and Nvidia libraries are located. TensorFlow looks for libraries in the given paths + LD_LIBRARY_PATH: "/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64" + ACCELERATOR_TYPE: "GPU" + runs-on: [self-hosted, gpu] + strategy: + max-parallel: 1 + matrix: ${{fromJson(needs.read_test_configuration.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Checkout dataset + uses: actions/checkout@v2 + with: + repository: ${{ secrets.DATASET_REPOSITORY }} + token: ${{ secrets.ML_TEST_SA_PAT }} + path: 'dataset' + + - name: Set DATASET and CONFIG variables + id: set_dataset_config_vars + env: + DATASET_NAME: "${{ matrix.dataset }}" + CONFIG_NAME: "${{ matrix.config }}" + run: |- + # determine DATASET and CONFIG environment variables + source <(gomplate -d mapping=./dataset/dataset_config_mapping.json -f .github/templates/configuration_variables.tmpl) + + # Not all configurations are available for all datasets. + # The job will fail and the workflow continues, if the configuration file doesn't exist + # for a given dataset + + echo "::set-output name=is_dataset_exists::true" + echo "::set-output name=is_config_exists::true" + + test -f dataset/configs/$CONFIG || (echo "::warning::The ${{ matrix.config }} configuration file doesn't exist. Skipping the job." \ + && echo "::set-output name=is_dataset_exists::false" && exit 0) + test -d dataset/$DATASET || (echo "::warning::The ${{ matrix.dataset }} dataset doesn't exist. Skipping the job." \ + && echo "::set-output name=is_config_exists::false" && exit 0) + + echo "::set-env name=DATASET::${DATASET}" + echo "::set-env name=CONFIG::${CONFIG}" + + - name: Set up Python 3.8 🐍 + uses: actions/setup-python@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + python-version: 3.8 + + - name: Read Poetry Version 🔢 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-3.8-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-3.8 + + - name: Install Dependencies 📦 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + poetry install --extras full + make install + poetry run python -m spacy download de_core_news_md + poetry run python -m spacy link --force de_core_news_md de + + - name: Validate that GPUs are working + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: |- + poetry run python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' || true + + - name: Run test + id: run_test + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + TFHUB_CACHE_DIR: ~/.tfhub_cache/ + OMP_NUM_THREADS: 1 + run: |- + poetry run rasa --version + + export NOW_TRAIN=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + cd ${{ github.workspace }}/dataset + poetry run rasa train nlu --quiet -u $DATASET/train -c configs/$CONFIG --out models/$DATASET/$CONFIG + echo "::set-output name=train_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + export NOW_TEST=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + poetry run rasa test nlu --quiet -u $DATASET/test -m models/$DATASET/$CONFIG --out ${{ github.workspace }}/results/$DATASET/$CONFIG + + echo "::set-output name=test_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TEST") }}{{ (time.Since $t).Round (time.Second 1) }}')" + echo "::set-output name=total_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + # Download the results of the previous runs + # The report file is extended with new results every next job run + - name: Download artifact + uses: actions/download-artifact@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + continue-on-error: true + with: + name: report.json + + - name: Generate a JSON file with a report / Publish results to Segment + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + SUMMARY_FILE: "./report.json" + SEGMENT_TOKEN: ${{ secrets.SEGMENT_TOKEN }} + DATASET_NAME: ${{ matrix.dataset }} + RESULT_DIR: "${{ github.workspace }}/results" + CONFIG: ${{ matrix.config }} + TEST_RUN_TIME: ${{ steps.run_test.outputs.test_run_time }} + TRAIN_RUN_TIME: ${{ steps.run_test.outputs.train_run_time }} + TOTAL_RUN_TIME: ${{ steps.run_test.outputs.total_run_time }} + PR_URL: "" + run: |- + poetry run pip install analytics-python + poetry run python .github/scripts/mr_publish_results.py + poetry run python .github/scripts/mr_generate_summary.py + cat $SUMMARY_FILE + + - name: Upload an artifact with the report + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + uses: actions/upload-artifact@v2 + with: + name: report.json + path: ./report.json + + remove_runner_gpu: + name: Delete Github Runner - GPU + if: always() + needs: + - deploy_runner_gpu + - model_regression_test_gpu + runs-on: ubuntu-latest + + steps: + # Setup gcloud CLI + - uses: GoogleCloudPlatform/github-actions@0.1.2 + with: + service_account_key: ${{ secrets.GKE_SA_RASA_CI_GPU }} + service_account_email: ${{ secrets.GKE_RASA_CI_GPU_SA_NAME }} + + # Get the GKE credentials so we can deploy to the cluster + - run: |- + gcloud container clusters get-credentials "${{ secrets.GKE_GPU_CLUSTER }}" --zone "$GKE_ZONE" --project "${{ secrets.GKE_SA_RASA_CI_GPU_PROJECT }}" + + - name: Remove Github Runner + run: kubectl -n github-runner delete deployments github-runner-${GITHUB_RUN_ID} --grace-period=30 diff --git a/.github/workflows/ci-model-regression.yml b/.github/workflows/ci-model-regression.yml new file mode 100644 index 000000000000..3080e4dd3513 --- /dev/null +++ b/.github/workflows/ci-model-regression.yml @@ -0,0 +1,589 @@ +# The docs: https://www.notion.so/rasa/The-CI-for-model-regression-tests-92af7185e08e4fb2a0c764770a8e9095 +name: CI - Model Regression + +on: + push: + branches: + - '[0-9]+.[0-9]+.x' + tags: + - '**' + pull_request: + types: [opened, synchronize, labeled] + +env: + GKE_ZONE: us-central1-a + +jobs: + cleanup_runs: + name: Cancel old branch builds + runs-on: ubuntu-latest + if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" + + steps: + - name: Find and cancel old builds of this branch + uses: rokroskar/workflow-run-cleanup-action@v0.2.2 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + read_test_configuration: + name: Reads tests configuration + needs: cleanup_runs + if: "github.repository == 'RasaHQ/rasa' && contains(github.event.pull_request.labels.*.name, 'status:model-regression-tests')" + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + configuration_id: ${{ steps.fc_config.outputs.comment-id }} + steps: + - name: Checkout master + uses: actions/checkout@v2 + + - name: Checkout dataset + uses: actions/checkout@v2 + with: + repository: ${{ secrets.DATASET_REPOSITORY }} + token: ${{ secrets.ML_TEST_SA_PAT }} + path: 'dataset' + + - name: Download gomplate + run: |- + sudo curl -o /usr/local/bin/gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + sudo chmod +x /usr/local/bin/gomplate + + - name: Find a comment with configuration + uses: tczekajlo/find-comment@master + id: fc_config + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ github.event.number }} + body-includes: "^/modeltest" + + - run: echo ${{ steps.fc_config.outputs.comment-id }} + + - name: Render help description from template + id: get_help_description + run: | + OUTPUT=$(gomplate -d mapping=./dataset/dataset_config_mapping.json -f .github/templates/model_regression_test_config_comment.tmpl) + OUTPUT="${OUTPUT//$'\n'/'%0A'}" + OUTPUT="${OUTPUT//$'\r'/'%0D'}" + echo "::set-output name=help_description::$OUTPUT" + + - name: Create a comment with help description + uses: RasaHQ/create-comment@v1 + with: + mode: 'delete-previous' + id: comment_help_description + github-token: ${{ secrets.GITHUB_TOKEN }} + body: | + ${{ steps.get_help_description.outputs.help_description }} + + - if: steps.fc_config.outputs.comment-id == '' + run: echo "::error::Cannot find a comment with the configuration" + name: Log a warning message if a configuration cannot be found + + - name: Read configuration from a PR comment + if: steps.fc_config.outputs.comment-id != '' + id: set-matrix + run: |- + echo "::set-output name=matrix::$(gomplate -d mapping=./dataset/dataset_config_mapping.json -d github=https://api.github.com/repos/${{ github.repository }}/issues/comments/${{ steps.fc_config.outputs.comment-id }} -H 'github=Authorization:token ${{ secrets.GITHUB_TOKEN }}' -f .github/templates/model_regression_test_config_to_json.tmpl)" + + - name: Update the comment with the configuration + uses: peter-evans/create-or-update-comment@v1 + if: steps.fc_config.outputs.comment-id != '' + with: + comment-id: ${{ steps.fc_config.outputs.comment-id }} + body: | + + reactions: eyes + + - name: Re-create the comment with the configuration + uses: RasaHQ/create-comment@v1 + if: steps.fc_config.outputs.comment-id != '' && steps.fc_config.outputs.comment-body != '' + with: + mode: 'delete-previous' + id: comment_configuration + github-token: ${{ secrets.GITHUB_TOKEN }} + body: ${{ steps.fc_config.outputs.comment-body }} + + - name: Find a comment with configuration - update + uses: tczekajlo/find-comment@master + id: fc_config_update + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ github.event.number }} + body-includes: "^/modeltest" + + - name: Add reaction + uses: peter-evans/create-or-update-comment@v1 + if: steps.fc_config_update.outputs.comment-id != '' + with: + edit-mode: 'replace' + comment-id: ${{ steps.fc_config_update.outputs.comment-id }} + reactions: heart, hooray, rocket + + - name: Add a comment that the tests are in progress + uses: RasaHQ/create-comment@v1 + if: steps.fc_config_update.outputs.comment-id != '' + with: + mode: 'delete-previous' + id: comment_tests_in_progress + github-token: ${{ secrets.GITHUB_TOKEN }} + body: | + The model regression tests have started. It might take a while, please be patient. + As soon as results are ready you'll see a new comment with the results. + + Used configuration can be found in [the comment.](https://github.com/${{ github.repository }}/pull/${{ github.event.number}}#issuecomment-${{ steps.fc_config_update.outputs.comment-id }}) + + deploy_runner_gpu: + name: Deploy Github Runner - GPU + needs: read_test_configuration + runs-on: ubuntu-latest + if: "contains(github.event.pull_request.labels.*.name, 'runner:gpu') && github.repository == 'RasaHQ/rasa' && contains(github.event.pull_request.labels.*.name, 'status:model-regression-tests') && needs.read_test_configuration.outputs.configuration_id != ''" + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Download gomplate + run: |- + sudo curl -o /usr/local/bin/gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + sudo chmod +x /usr/local/bin/gomplate + + - name: Render deployment template + run: |- + gomplate -f .github/runner/github-runner-deployment.yaml.tmpl -o runner_deployment.yaml + + # Setup gcloud CLI + - uses: GoogleCloudPlatform/github-actions@0.1.2 + with: + service_account_key: ${{ secrets.GKE_SA_RASA_CI_GPU }} + service_account_email: ${{ secrets.GKE_RASA_CI_GPU_SA_NAME}} + + # Get the GKE credentials so we can deploy to the cluster + - run: |- + gcloud container clusters get-credentials "${{ secrets.GKE_GPU_CLUSTER }}" --zone "$GKE_ZONE" --project "${{ secrets.GKE_SA_RASA_CI_GPU_PROJECT }}" + + - name: Deploy Github Runner + run: |- + kubectl apply -f runner_deployment.yaml + kubectl -n github-runner rollout status --timeout=15m deployment/github-runner-$GITHUB_RUN_ID + + model_regression_test_gpu: + name: Model Regression Tests - GPU + continue-on-error: true + needs: + - deploy_runner_gpu + - read_test_configuration + env: + # Determine where CUDA and Nvidia libraries are located. TensorFlow looks for libraries in the given paths + LD_LIBRARY_PATH: "/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64" + ACCELERATOR_TYPE: "GPU" + runs-on: [self-hosted, gpu] + strategy: + max-parallel: 1 + matrix: ${{fromJson(needs.read_test_configuration.outputs.matrix)}} + if: "contains(github.event.pull_request.labels.*.name, 'runner:gpu') && github.repository == 'RasaHQ/rasa' && contains(github.event.pull_request.labels.*.name, 'status:model-regression-tests') && needs.read_test_configuration.outputs.configuration_id != ''" + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Checkout dataset + uses: actions/checkout@v2 + with: + repository: ${{ secrets.DATASET_REPOSITORY }} + token: ${{ secrets.ML_TEST_SA_PAT }} + path: 'dataset' + + - name: Set DATASET and CONFIG variables + id: set_dataset_config_vars + env: + DATASET_NAME: "${{ matrix.dataset }}" + CONFIG_NAME: "${{ matrix.config }}" + run: |- + # determine DATASET and CONFIG environment variables + source <(gomplate -d mapping=./dataset/dataset_config_mapping.json -f .github/templates/configuration_variables.tmpl) + + # Not all configurations are available for all datasets. + # The job will fail and the workflow continues, if the configuration file doesn't exist + # for a given dataset + + echo "::set-output name=is_dataset_exists::true" + echo "::set-output name=is_config_exists::true" + + test -f dataset/configs/$CONFIG || (echo "::warning::The ${{ matrix.config }} configuration file doesn't exist. Skipping the job." \ + && echo "::set-output name=is_dataset_exists::false" && exit 0) + test -d dataset/$DATASET || (echo "::warning::The ${{ matrix.dataset }} dataset doesn't exist. Skipping the job." \ + && echo "::set-output name=is_config_exists::false" && exit 0) + + echo "::set-env name=DATASET::${DATASET}" + echo "::set-env name=CONFIG::${CONFIG}" + + - name: Set up Python 3.8 🐍 + uses: actions/setup-python@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + python-version: 3.8 + + - name: Read Poetry Version 🔢 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-3.8-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-3.8 + + - name: Install Dependencies 📦 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + poetry install --extras full + make install + poetry run python -m spacy download de_core_news_md + poetry run python -m spacy link --force de_core_news_md de + + - name: Validate that GPUs are working + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: |- + poetry run python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' || true + + - name: Run test + id: run_test + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + TFHUB_CACHE_DIR: ~/.tfhub_cache/ + OMP_NUM_THREADS: 1 + run: |- + poetry run rasa --version + + export NOW_TRAIN=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + cd ${{ github.workspace }}/dataset + poetry run rasa train nlu --quiet -u $DATASET/train -c configs/$CONFIG --out models/$DATASET/$CONFIG + echo "::set-output name=train_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + export NOW_TEST=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + poetry run rasa test nlu --quiet -u $DATASET/test -m models/$DATASET/$CONFIG --out ${{ github.workspace }}/results/$DATASET/$CONFIG + + echo "::set-output name=test_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TEST") }}{{ (time.Since $t).Round (time.Second 1) }}')" + echo "::set-output name=total_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + # Download the results of the previous runs + # The report file is extended with new results every next job run + - name: Download artifact + uses: actions/download-artifact@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + continue-on-error: true + with: + name: report.json + + - name: Generate a JSON file with a report / Publish results to Segment + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + SUMMARY_FILE: "./report.json" + SEGMENT_TOKEN: ${{ secrets.SEGMENT_TOKEN }} + DATASET_NAME: ${{ matrix.dataset }} + RESULT_DIR: "${{ github.workspace }}/results" + CONFIG: ${{ matrix.config }} + TEST_RUN_TIME: ${{ steps.run_test.outputs.test_run_time }} + TRAIN_RUN_TIME: ${{ steps.run_test.outputs.train_run_time }} + TOTAL_RUN_TIME: ${{ steps.run_test.outputs.total_run_time }} + run: |- + export PR_URL="https://github.com/${GITHUB_REPOSITORY}/pull/${{ github.event.number }}" + poetry run pip install analytics-python + poetry run python .github/scripts/mr_publish_results.py + poetry run python .github/scripts/mr_generate_summary.py + cat $SUMMARY_FILE + + - name: Upload an artifact with the report + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + uses: actions/upload-artifact@v2 + with: + name: report.json + path: ./report.json + + model_regression_test_cpu: + name: Model Regression Tests - CPU + continue-on-error: true + needs: + - read_test_configuration + env: + ACCELERATOR_TYPE: "CPU" + runs-on: ubuntu-latest + strategy: + max-parallel: 1 + matrix: ${{fromJson(needs.read_test_configuration.outputs.matrix)}} + if: "!contains(github.event.pull_request.labels.*.name, 'runner:gpu') && github.repository == 'RasaHQ/rasa' && contains(github.event.pull_request.labels.*.name, 'status:model-regression-tests') && needs.read_test_configuration.outputs.configuration_id != ''" + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Checkout dataset + uses: actions/checkout@v2 + with: + repository: ${{ secrets.DATASET_REPOSITORY }} + token: ${{ secrets.ML_TEST_SA_PAT }} + path: 'dataset' + + - name: Download gomplate + run: |- + sudo curl -o /usr/local/bin/gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + sudo chmod +x /usr/local/bin/gomplate + + - name: Set DATASET and CONFIG variables + id: set_dataset_config_vars + env: + DATASET_NAME: "${{ matrix.dataset }}" + CONFIG_NAME: "${{ matrix.config }}" + run: |- + # determine DATASET and CONFIG environment variables + source <(gomplate -d mapping=./dataset/dataset_config_mapping.json -f .github/templates/configuration_variables.tmpl) + + # Not all configurations are available for all datasets. + # The job will fail and the workflow continues, if the configuration file doesn't exist + # for a given dataset + + echo "::set-output name=is_dataset_exists::true" + echo "::set-output name=is_config_exists::true" + + test -f dataset/configs/$CONFIG || (echo "::warning::The ${{ matrix.config }} configuration file doesn't exist. Skipping the job." \ + && echo "::set-output name=is_dataset_exists::false" && exit 0) + test -d dataset/$DATASET || (echo "::warning::The ${{ matrix.dataset }} dataset doesn't exist. Skipping the job." \ + && echo "::set-output name=is_config_exists::false" && exit 0) + + echo "::set-env name=DATASET::${DATASET}" + echo "::set-env name=CONFIG::${CONFIG}" + + - name: Set up Python 3.8 🐍 + uses: actions/setup-python@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + python-version: 3.8 + + - name: Read Poetry Version 🔢 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-3.8-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-3.8 + + - name: Install Dependencies 📦 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + run: | + poetry install --extras full + make install + poetry run python -m spacy download de_core_news_md + poetry run python -m spacy link --force de_core_news_md de + + - name: Run test + id: run_test + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + TFHUB_CACHE_DIR: ~/.tfhub_cache/ + OMP_NUM_THREADS: 1 + run: |- + poetry run rasa --version + + export NOW_TRAIN=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + cd ${{ github.workspace }}/dataset + poetry run rasa train nlu --quiet -u $DATASET/train -c configs/$CONFIG --out models/$DATASET/$CONFIG + echo "::set-output name=train_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + export NOW_TEST=$(gomplate -i '{{ (time.Now).Format time.RFC3339}}'); + poetry run rasa test nlu --quiet -u $DATASET/test -m models/$DATASET/$CONFIG --out ${{ github.workspace }}/results/$DATASET/$CONFIG + + echo "::set-output name=test_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TEST") }}{{ (time.Since $t).Round (time.Second 1) }}')" + echo "::set-output name=total_run_time::$(gomplate -i '{{ $t := time.Parse time.RFC3339 (getenv "NOW_TRAIN") }}{{ (time.Since $t).Round (time.Second 1) }}')" + + # Download the results of the previous runs + # The report file is extended with new results every next job run + - name: Download artifact + uses: actions/download-artifact@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + continue-on-error: true + with: + name: report.json + + - name: Generate a JSON file with a report / Publish results to Segment + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + env: + SUMMARY_FILE: "./report.json" + SEGMENT_TOKEN: ${{ secrets.SEGMENT_TOKEN }} + DATASET_NAME: ${{ matrix.dataset }} + RESULT_DIR: "${{ github.workspace }}/results" + CONFIG: ${{ matrix.config }} + TEST_RUN_TIME: ${{ steps.run_test.outputs.test_run_time }} + TRAIN_RUN_TIME: ${{ steps.run_test.outputs.train_run_time }} + TOTAL_RUN_TIME: ${{ steps.run_test.outputs.total_run_time }} + run: |- + export PR_URL="https://github.com/${GITHUB_REPOSITORY}/pull/${{ github.event.number }}" + poetry run pip install analytics-python + poetry run python .github/scripts/mr_publish_results.py + poetry run python .github/scripts/mr_generate_summary.py + cat $SUMMARY_FILE + + - name: Upload an artifact with the report + uses: actions/upload-artifact@v2 + if: steps.set_dataset_config_vars.outputs.is_dataset_exists == 'true' && steps.set_dataset_config_vars.outputs.is_config_exists == 'true' + with: + name: report.json + path: ./report.json + + add_comment_results_gpu: + name: Add a comment with the results + runs-on: ubuntu-latest + needs: + - model_regression_test_gpu + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Download the report + uses: actions/download-artifact@v2 + with: + name: report.json + + - name: Download gomplate + run: |- + sudo curl -o /usr/local/bin/gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + sudo chmod +x /usr/local/bin/gomplate + + - name: Render a comment to add + id: get_results + run: | + OUTPUT="$(gomplate -d data=report.json -f .github/templates/model_regression_test_results.tmpl)" + OUTPUT="${OUTPUT//$'\n'/'%0A'}" + OUTPUT="${OUTPUT//$'\r'/'%0D'}" + echo "::set-output name=result::$OUTPUT" + + - name: Publish results as a PR comment + uses: marocchino/sticky-pull-request-comment@v1 + if: always() + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + header: ${{ github.run_id }} + append: true + message: |- + + Commit: ${{ github.sha }}, [The full report is available as an artifact.](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + + ${{ steps.get_results.outputs.result }} + + - name: Remove 'status:model-regression-tests' label + continue-on-error: true + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: 'status:model-regression-tests' + type: remove + + - name: Remove 'runner:gpu' label + continue-on-error: true + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: 'runner:gpu' + type: remove + + add_comment_results_cpu: + name: Add a comment with the results + runs-on: ubuntu-latest + needs: + - model_regression_test_cpu + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Download the report + uses: actions/download-artifact@v2 + with: + name: report.json + + - name: Download gomplate + run: |- + sudo curl -o /usr/local/bin/gomplate -sSL https://github.com/hairyhenderson/gomplate/releases/download/v3.6.0/gomplate_linux-amd64 + sudo chmod +x /usr/local/bin/gomplate + + - name: Render a comment to add + id: get_results + run: | + OUTPUT="$(gomplate -d data=report.json -f .github/templates/model_regression_test_results.tmpl)" + OUTPUT="${OUTPUT//$'\n'/'%0A'}" + OUTPUT="${OUTPUT//$'\r'/'%0D'}" + echo "::set-output name=result::$OUTPUT" + + - name: Publish results as a PR comment + uses: marocchino/sticky-pull-request-comment@v1 + if: always() + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + header: ${{ github.run_id }} + append: true + message: |- + + Commit: ${{ github.sha }}, [The full report is available as an artifact.](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + + ${{ steps.get_results.outputs.result }} + + - name: Remove 'status:model-regression-tests' label + continue-on-error: true + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: 'status:model-regression-tests' + type: remove + + - name: Remove 'runner:gpu' label + continue-on-error: true + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: 'runner:gpu' + type: remove + + remove_runner_gpu: + name: Delete Github Runner - GPU + needs: + - deploy_runner_gpu + - model_regression_test_gpu + runs-on: ubuntu-latest + if: "contains(github.event.pull_request.labels.*.name, 'runner:gpu') && always() && contains(github.event.pull_request.labels.*.name, 'status:model-regression-tests')" + + steps: + # Setup gcloud CLI + - uses: GoogleCloudPlatform/github-actions@0.1.2 + with: + service_account_key: ${{ secrets.GKE_SA_RASA_CI_GPU }} + service_account_email: ${{ secrets.GKE_RASA_CI_GPU_SA_NAME }} + + # Get the GKE credentials so we can deploy to the cluster + - run: |- + gcloud container clusters get-credentials "${{ secrets.GKE_GPU_CLUSTER }}" --zone "$GKE_ZONE" --project "${{ secrets.GKE_SA_RASA_CI_GPU_PROJECT }}" + + - name: Remove Github Runner + run: kubectl -n github-runner delete deployments github-runner-${GITHUB_RUN_ID} --grace-period=30 diff --git a/.github/workflows/continous-integration.yml b/.github/workflows/continous-integration.yml new file mode 100644 index 000000000000..f7434627d228 --- /dev/null +++ b/.github/workflows/continous-integration.yml @@ -0,0 +1,295 @@ +name: Continuous Integration + +on: + push: + branches: + - master + tags: + - '*' + pull_request: + +# SECRETS +# - GH_RELEASE_NOTES_TOKEN: personal access token of `rasabot` github account +# (login for account in 1pw) +# - SLACK_WEBHOOK_TOKEN: token to post to RasaHQ slack account (in 1password) +# - PYPI_TOKEN: publishing token for amn41 account, needs to be maintainer of +# RasaHQ/rasa on pypi (account credentials in 1password) +# - DOCKERHUB_PASSWORD: password for an account with write access to the rasa +# repo on hub.docker.com. used to pull and upload containers + +env: + # needed to fix issues with boto during testing: + # https://github.com/travis-ci/travis-ci/issues/7940 + BOTO_CONFIG: /dev/null + +jobs: + api: + name: Test API specification + runs-on: ubuntu-latest + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Setup Node 🦝 + uses: actions/setup-node@v1 + with: + node-version: '10.x' + + - name: Run Swagger 🕵️‍♀️ + run: | + npm install -g swagger-cli + swagger-cli validate docs/static/spec/action-server.yml + swagger-cli validate docs/static/spec/rasa.yml + + quality: + name: Code Quality + runs-on: ubuntu-latest + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Set up Python 3.7 🐍 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Set up Node 12.x 🦙 + uses: actions/setup-node@v1 + with: + node-version: '12.x' + + - name: Read Poetry Version 🔢 + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-3.7-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-3.7 + + - name: Load Yarn Cached Packages ⬇ + uses: actions/cache@v1 + with: + path: docs/node_modules + key: ${{ runner.os }}-yarn-12.x-${{ hashFiles('docs/yarn.lock') }} + restore-keys: ${{ runner.os }}-yarn-12.x + + - name: Install Dependencies 📦 + run: | + sudo apt-get -y install libpq-dev + make install-full install-docs + + - name: Lint Code 🎎 + run: make lint + + - name: Check Types 📚 + run: make types + + - name: Test CLI 🖥 + # makes sure we catch any dependency error early. they will create strange + # errors during the docs build, so easier to catch them early on by + # trying to run the `rasa` command once before the docs build. + run: poetry run rasa --help + + - name: Test Docs 📃 + run: | + poetry run python -c "from scripts import release; release.generate_changelog('major.minor.patch')" + make docs + + test: + name: Run Tests + runs-on: ${{ matrix.os }} + strategy: + max-parallel: 3 + matrix: + os: [ubuntu-latest, windows-latest] + python-version: [3.6, 3.7, 3.8] + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} 🐍 + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Read Poetry Version 🔢 + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-${{ matrix.python-version }}-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-${{ matrix.python-version }} + + - name: Install Dependencies (Linux) 📦 + if: matrix.os == 'ubuntu-latest' + run: | + sudo apt-get -y install libpq-dev + make install-full | tee .output + if grep 'The lock file is not up to date' .output; then exit 1; fi + make prepare-tests-ubuntu + + - name: Install Dependencies (Windows) 📦 + if: matrix.os == 'windows-latest' + run: | + make install-full-windows + make prepare-tests-windows + + - name: Test Code 🔍 + # We don't test Windows code with Python 3.8 because there is an issue + # with python-crc32c library — CFFI is not supported by Github Runners (the corresponding + # C++ Build Tools are not installed on their machines). In this case, python-crc32c + # should use a pure Python implementation of CFFI but Google made a mistake with + # the order of arguments in their code, and their solution doesn't work. + # This PR fixes the issue but it's not merged yet: + # https://github.com/googleapis/python-crc32c/pull/27 + if: matrix.os != 'windows-latest' || matrix.python-version != '3.8' + env: + JOBS: 2 + PYTHONIOENCODING: "utf-8" + run: make test + + - name: Send Coverage Report 📊 + if: matrix.python-version == 3.6 + env: + COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COVERALLS_SERVICE_NAME: github + run: poetry run coveralls + + docker: + name: Build Docker + runs-on: ubuntu-latest + + strategy: + matrix: + image: + - {"file": "Dockerfile", "tag_ext": ""} + - {"file": "docker/Dockerfile_full", "tag_ext": "-full"} + - {"file": "docker/Dockerfile_pretrained_embeddings_mitie_en", "tag_ext": "-mitie-en"} + - {"file": "docker/Dockerfile_pretrained_embeddings_spacy_de", "tag_ext": "-spacy-de"} + - {"file": "docker/Dockerfile_pretrained_embeddings_spacy_en", "tag_ext": "-spacy-en"} + + env: + DOCKERHUB_USERNAME: tmbo + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Free disk space + # tries to make sure we do not run out of disk space, see + # https://github.community/t5/GitHub-Actions/BUG-Strange-quot-No-space-left-on-device-quot-IOExceptions-on/td-p/46101 + run: | + sudo swapoff -a + sudo rm -f /swapfile + sudo apt clean + docker rmi $(docker image ls -aq) + df -h + + - name: Login to DockerHub Registry 🔢 + run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ env.DOCKERHUB_USERNAME }} --password-stdin || true + + - name: Pull latest${{ matrix.image.tag_ext }} Docker image for caching + run: docker pull rasa/rasa:latest${{ matrix.image.tag_ext }} || true + + - name: Build latest${{ matrix.image.tag_ext }} Docker image + run: docker build . --file ${{ matrix.image.file }} --tag rasa/rasa:latest${{ matrix.image.tag_ext }} --cache-from rasa/rasa:latest${{ matrix.image.tag_ext }} + + - name: Push image with latest tag 📦 + if: github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'RasaHQ/rasa' + run: docker push rasa/rasa:latest${{ matrix.image.tag_ext }} + + - name: Push image with ${{github.ref}} tag 📦 + env: + GITHUB_TAG: ${{ github.ref }} + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') && github.repository == 'RasaHQ/rasa' + run: | + GITHUB_TAG=${GITHUB_TAG/refs\/tags\//} + docker tag rasa/rasa:latest${{ matrix.image.tag_ext }} rasa/rasa:${GITHUB_TAG}${{ matrix.image.tag_ext }} + docker push rasa/rasa:${GITHUB_TAG}${{ matrix.image.tag_ext }} + + deploy: + name: Deploy to PyPI + runs-on: ubuntu-latest + + # deploy will only be run when there is a tag available + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') && github.repository == 'RasaHQ/rasa' + needs: [quality, test, api, docker] # only run after all other stages succeeded + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Set up Python 3.6 🐍 + uses: actions/setup-python@v1 + with: + python-version: 3.6 + + - name: Read Poetry Version 🔢 + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Build ⚒️ Distributions + run: poetry build + + - name: Publish to PyPI 📦 + uses: pypa/gh-action-pypi-publish@37e305e7413032d8422456179fee28fac7d25187 + with: + user: __token__ + password: ${{ secrets.PYPI_TOKEN }} + + - name: Notify Slack & Publish Release Notes 🗞 + env: + GH_RELEASE_NOTES_TOKEN: ${{ secrets.GH_RELEASE_NOTES_TOKEN }} + SLACK_WEBHOOK_TOKEN: ${{ secrets.SLACK_WEBHOOK_TOKEN }} + GITHUB_TAG: ${{ github.ref }} + GITHUB_REPO_SLUG: ${{ github.repository }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + GITHUB_TAG=${GITHUB_TAG/refs\/tags\//} + pip install -U github3.py pep440-version-utils + python3 scripts/publish_gh_release_notes.py + ./scripts/ping_slack_about_package_release.sh + + mergepal-merge: # run merge pal in the end + runs-on: ubuntu-latest + needs: [quality, test, api, docker] + steps: + - uses: actions/checkout@v1 + - name: Make sure there is a github token + shell: bash + run: | + if [ -z "${{ secrets.RASABOT_AUTOMERGE_GITHUB_TOKEN }}" ]; then + echo ::set-env name=MERGE_TOKEN::${{ secrets.GITHUB_TOKEN }} + else + echo ::set-env name=MERGE_TOKEN::${{ secrets.RASABOT_AUTOMERGE_GITHUB_TOKEN }} + fi + - uses: rasahq/merge-pal-action@master + with: + token: ${{ env.MERGE_TOKEN }} diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 000000000000..a27722c58a60 --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,83 @@ +name: Publish Documentation + +on: + push: + branches: + - 'master' + tags: + - '**' + +# SECRETS +# - GH_DOCS_WRITE_KEY: generated locally, added to github repo (public key) +# `ssh-keygen -t rsa -b 4096 -C "Github CI Docs Key" -N "" -f key` +# - GITHUB_TOKEN: (default, from github actions) + +env: + DOCS_FOLDER: docs + DOCS_BRANCH: documentation + +jobs: + docs: + name: Build Docs + runs-on: ubuntu-latest + if: github.repository == 'RasaHQ/rasa' # don't run this for master branches of forks, would fail anyways + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Set up Python 3.7 🐍 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Set up Node 12.x 🦙 + uses: actions/setup-node@v1 + with: + node-version: '12.x' + + - name: Read Poetry Version 🔢 + run: | + echo "::set-env name=POETRY_VERSION::$(scripts/poetry-version.sh)" + shell: bash + + - name: Install poetry 🦄 + uses: Gr1N/setup-poetry@v1 + with: + poetry-version: ${{ env.POETRY_VERSION }} + + - name: Load Poetry Cached Libraries ⬇ + uses: actions/cache@v1 + with: + path: ~/.cache/pypoetry/virtualenvs + key: ${{ runner.os }}-poetry-3.6-non-full-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry-3.6-non-full + + - name: Load Yarn Cached Packages ⬇ + uses: actions/cache@v1 + with: + path: docs/node_modules + key: ${{ runner.os }}-yarn-12.x-${{ hashFiles('docs/yarn.lock') }} + restore-keys: ${{ runner.os }}-yarn-12.x + + - name: Install Dependencies 📦 + run: make install install-docs + + - name: Pre-build Docs 🧶 + run: cd $DOCS_FOLDER && poetry run yarn pre-build + + - name: Build & Publish Docs 🏃‍♀️ + env: + GH_DOCS_WRITE_KEY: ${{ secrets.GH_DOCS_WRITE_KEY }} + TMP_DOCS_FOLDER: /tmp/documentation-${{ github.run_id }} + TMP_SSH_KEY_PATH: /tmp/docs_key + run: | + eval "$(ssh-agent -s)"; touch $TMP_SSH_KEY_PATH; chmod 0600 $TMP_SSH_KEY_PATH + echo "$GH_DOCS_WRITE_KEY" > $TMP_SSH_KEY_PATH + ssh-add $TMP_SSH_KEY_PATH + + git config --global user.email "builds@github-ci.com" + git config --global user.name "GitHub CI" + git remote set-url --push origin "git@github.com:${{github.repository}}" + + ./scripts/push_docs_to_branch.sh diff --git a/.github/workflows/vulnerability-scan.yml b/.github/workflows/vulnerability-scan.yml new file mode 100644 index 000000000000..83fec17ad037 --- /dev/null +++ b/.github/workflows/vulnerability-scan.yml @@ -0,0 +1,45 @@ +name: Vulnerability Scan + +on: + schedule: + # Run every third day + - cron: 0 0 * * */3 + +jobs: + scan: + name: Vulnerability scan + runs-on: ubuntu-latest + + env: + DOCKERFILE: Dockerfile_with_poetry_lock + + steps: + - name: Checkout git repository 🕝 + uses: actions/checkout@v2 + + - name: Add poetry.lock 🔒 + # Trivy depends on the presence of `poetry.lock` to scan Python dependencies + run: | + BASE_IMAGE=rasa/rasa:latest-full + docker pull $BASE_IMAGE + + # Create Dockerfile which includes poetry.lock + tee -a $DOCKERFILE << END + FROM $BASE_IMAGE + COPY poetry.lock . + END + + IMAGE_NAME=rasa/rasa:latest-scanned + docker build -f $DOCKERFILE -t $IMAGE_NAME . + + echo "::set-env name=IMAGE_WITH_POETRY_LOCK::$IMAGE_NAME" + + - name: Scan image 🕵️‍♀️🕵️‍♂️ + uses: wochinge/gitrivy@6bf026b + with: + # Needs the token so it can create an issue once a vulnerability was found + token: ${{ secrets.GITHUB_TOKEN }} + image: ${{ env.IMAGE_WITH_POETRY_LOCK }} + ignore_unfixed: true + issue_label: "tool:trivy,type:vulnerability" + fail_on_vulnerabilities: true diff --git a/.gitignore b/.gitignore index dfc46a47dbf0..7cbee1f55f9f 100644 --- a/.gitignore +++ b/.gitignore @@ -18,8 +18,6 @@ venv .pytype dist/ pip-wheel-metadata -docs/nlu/_build -docs/_build server/ scala/ mongodb/ @@ -48,15 +46,11 @@ tmp_training_data.json models/ .mypy_cache/ *.tar.gz -docs/nlu/key -docs/nlu/key.pub secrets.tar .pytest_cache -src test_download.zip bower_components/ build/lib/ -docs/core/_build /models/ node_modules/ npm-debug.log @@ -68,16 +62,30 @@ graph.html story_graph.html story_graph.dot debug.md -examples/restaurantbot/models* examples/moodbot/*.png examples/moodbot/errors.json examples/formbot/models* -examples/concertbot/data* examples/concertbot/models* examples/moodbot/models* -docs/core/key -docs/core/key.pub failed_stories.md errors.json pip-wheel-metadata/* events.db +events.db-shm +events.db-wal +rasa.db +rasa.db-shm +rasa.db-wal +*.swp +*.coverage* +env +.dir-locals.el +.history +docs/.docusaurus +docs/docs/variables.json +docs/docs/sources/ +docs/docs/reference/ +docs/docs/changelog.mdx + +# Local Netlify folder +.netlify diff --git a/.mergepal.yml b/.mergepal.yml new file mode 100644 index 000000000000..f3da2369d8e5 --- /dev/null +++ b/.mergepal.yml @@ -0,0 +1,3 @@ +whitelist: + - status:ready-to-merge +method: merge diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ba2e0a358a68..5976bf6ddc32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,4 +3,8 @@ repos: rev: stable hooks: - id: black - language_version: python3.6 +- repo: https://github.com/thlorenz/doctoc + rev: master + hooks: + - id: doctoc + files: "CONTRIBUTING.md" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 3eaf9306f2a2..000000000000 --- a/.travis.yml +++ /dev/null @@ -1,117 +0,0 @@ -language: python -sudo: enabled -dist: xenial -cache: - directories: - - $HOME/.cache/pip - - .pytype -python: '3.6' -env: - # needed to fix issues with boto during testing: - # https://github.com/travis-ci/travis-ci/issues/7940 - global: - - BOTO_CONFIG=/dev/null - - PIP_USE_PEP517=false - -install: - # The instructions listed here should be equivalent to the ones - # listed in README.md! - - sudo apt-get update -qq - - python -m pip install -U pip - - pip install -r requirements-dev.txt - - pip install -e . - - make prepare-tests-ubuntu - - pip list -before_script: - - mkdir $HOME/tmp - - export TMPDIR=$HOME/tmp -jobs: - include: - - stage: test - name: "Code Formatting" - python: 3.6 - script: - - make lint - - stage: test - name: "Type Check" - python: 3.6 - script: - - make types - - &run-tests - stage: test - name: "Test 3.5" - python: "3.5" - script: - - make test - - <<: *run-tests - name: "Test 3.6" - python: '3.6' - - <<: *run-tests - name: "Test 3.7" - python: '3.7' - after_success: - - coveralls # only this version will submit results to coveralls - - stage: integration - name: "Test API specification" - language: python - install: - - npm install -g swagger-cli - script: - - swagger-cli validate docs/_static/spec/action-server.yml - - swagger-cli validate docs/_static/spec/rasa.yml - - stage: test - name: "Test Docs" - install: - - pip install -r requirements-docs.txt - - pip install -e .[sql] - - pip list - script: - # be strict about warnings --> they will be treated as errors - - cd docs - - make SPHINXOPTS="-W --keep-going -A html_theme=rasabaster" html - - stage: docs - if: fork = false AND branch = "master" # forked repository will skip building docs, only master & PRs to it - install: - - RASABASTER=rasabaster-0.7.23.tar.gz - - curl -sSL -o $RASABASTER "https://storage.googleapis.com/docs-theme/${RASABASTER}?q=$(date +%s%N)" - - pip install $RASABASTER - - pip install --no-cache-dir -r requirements-docs.txt - - pip install git+https://${GITHUB_TOKEN}:x-oauth-basic@github.com/RasaHQ/sphinxcontrib-versioning.git@version_list - - pip install -e . - - pip list - script: - - eval "$(ssh-agent -s)"; touch docs_key; chmod 0600 docs_key - - openssl aes-256-cbc - -K $encrypted_1708c37fe733_key - -iv $encrypted_1708c37fe733_iv - -in secrets.tar.enc - -out secrets.tar - -d - - tar -xvf secrets.tar - - ssh-add docs_key - - git config --global user.email "builds@travis-ci.com" - - git config --global user.name "Travis CI" - - git remote set-url --push origin "git@github.com:$TRAVIS_REPO_SLUG" - - export ${!TRAVIS*} - - sphinx-versioning push docs docs . -- -b dirhtml -A html_theme=rasabaster - - stage: docs-netlify - if: branch = master AND type != pull_request OR tag IS present - install: skip - script: - - curl -X POST -d "docs" https://api.netlify.com/build_hooks/${NETLIFY_HOOK_ID} - - stage: deploy - name: "Deploy to PyPI" - python: 3.6 - if: tag IS present - install: skip - script: skip - deploy: - provider: pypi - user: amn41 - # server: https://test.pypi.org/legacy/ - on: - tags: true - distributions: "sdist bdist_wheel" - password: - secure: "MeL1Ve97eBY+VbNWuQNuLzkPs0TPc+Zh8OfZkhw69ez5imsiWpvp0LrUOLVW3CcC0vNTANEBOVX/n1kHxfcqkf/cChNqAkZ6zTMmvR9zHDwQxXVGZ3jEQSQM+fHdQpjwtH7BwojyxaCIC/5iza7DFMcca/Q6Xr+atdTd0V8Q7Nc5jFHEQf3/4oIIm6YeCUiHcEu981LRdS04+jvuFUN0Ejy+KLukGVyIWyYDjjGjs880Mj4J1mgmCihvVkJ1ujB65rYBdTjls3JpP3eTk63+xH8aHilIuvqB8TDYih8ovE/Vv6YwLI+u2HoEHAtBD4Ez3r71Ju6JKJM7DhWb5aurN4M7K6DC8AvpUl+PsJbNP4ZeW2jXMH6lT6qXKVaSw7lhZ0XY3wunyVcAbArX4RS0B9pb1nHBYUBWZjxXtr8lhkpGFu7H43hw63Y19qb8z4+1cGnijgz1mqXSAssuc+3r0W0cSr+OsCjmOs7cwT6HMQvPEKxLohwBOS/I3EbuKQOYMjFN5BWP5JXbsG45awV9tquxEW8zxjMetR+AOcYoyrDeiR8sAnj1/F99DE0bL1KyW/G5VNu2Xi/c+0M3KvP3+F8XTCuUY/5zTvqh1Qz1jcdiwsiAhO4eBQzQnjeFlxdiVeue2kmD5qsh+VLKKuKLfyVoaV7b1kBlAtBDu7+hDpA=" - after_deploy: bash scripts/ping_slack_about_package_release.sh diff --git a/.typo-ci.yml b/.typo-ci.yml new file mode 100644 index 000000000000..c992f599e0d2 --- /dev/null +++ b/.typo-ci.yml @@ -0,0 +1,176 @@ +# This is a sample .typo-ci.yml file, it's used to configure how Typo CI will behave. +# Add it to the root of your project and push it to github. +--- + +# What language dictionaries should it use? Currently Typo CI supports: +# de +# en +# en_GB +# es +# fr +# it +# pt +# pt_BR +dictionaries: + - en + +# # Any files/folders we should ignore? +excluded_files: + - "*.py" + - "*.css" + - "*.yml" + - "*.yaml" + - "*.html" + - "*.json" + - "*.lock" + - "*.js" + - "*.md" + - "CHANGELOG.mdx" + - "CODE_OF_CONDUCT.md" + - "CONTRIBUTING.md" + - "Dockerfile" + - "LICENSE.txt" + - "Makefile" + - "NOTICE" + - "README.md" + - "cloudbuild.yaml" + - "pyproject.toml" + - "secrets.tar.enc" + - "setup.cfg" + - ".codeclimate.yml" + - ".coveragerc" + - ".deepsource.toml" + - ".dockerignore" + - ".env" + - ".git" + - ".gitattributes" + - ".gitignore" + - ".pre-commit-config.yaml" + - ".typo-ci.yml" + - ".github/**/*" + - "binder/**/*" + - "data/**/*" + - "docker/**/*" + - "examples/**/*" + - "rasa/**/*" + - "scripts/**/*" + - "tests/**/*" + +# # Any typos we should ignore? +excluded_words: + - analytics + - asyncio + - bot + - bot's + - cdd + - CDD + - cmdline + - conveRT + - ConveRTFeaturizer + - ConveRTTokenizer + - crfsuite + - custom-nlg-service + - daksh + - db's + - deque + - docusaurus + - non-latin + - deduplicate + - deduplication + - donath + - matplotlib + - extractor + - fbmessenger + - featurization + - featurized + - featurizer + - featurizers + - featurizes + - featurizing + - forni + - initializer + - instaclient + - jwt + - jwt's + - jupyter + - jupyterhub + - karpathy + - keras + - knowledgebase + - knowledgebasebot + - linenos + - luis + - matmul + - mattermost + - memoization + - miniconda + - mitie + - mitie's + - mitienlp + - dataset + - mongod + - mrkdown + - mrkdwn + - mymodelname + - myuser + - numpy + - networkx + - nlu + - nlu's + - perceptron + - pika + - pika's + - jieba + - pretrained + - prototyper + - pycodestyle + - pykwalify + - pymessenger + - pyobject + - python-engineio + - pre + - customizable + - quickstart + - rasa + - rasa's + - readthedocs + - repo + - rst + - sanic + - sanitization + - scipy + - sklearn + - spacy + - spacynlp + - ish + - spaCy + - spaCy's + - README + - crf + - backends + - whitespaced + - ngram + - subsampled + - testagent + - tokenize + - tokenized + - tokenization + - tokenizer + - tokenizers + - typoci + - unfeaturized + - unschedule + - wsgi + - ruamel + - prototyper + - hallo + - crypto + - regexes + - walkthroughs + - venv + - regexfeaturizer + - crfentityextractor + - Comerica + - entitysynonymmapper + +spellcheck_filenames: false diff --git a/CHANGELOG.mdx b/CHANGELOG.mdx new file mode 100644 index 000000000000..818ceec87bb9 --- /dev/null +++ b/CHANGELOG.mdx @@ -0,0 +1,2034 @@ +--- +id: changelog +sidebar_label: Rasa Open Source Change Log +title: Rasa Open Source Change Log +--- + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](https://semver.org/) starting with version 1.0. + + + + +## [1.10.10] - 2020-08-04 + +Bugfixes +-------- +- [#6280](https://github.com/rasahq/rasa/issues/6280): Fixed `TypeError: expected string or bytes-like object` + issue caused by integer, boolean, and null values in templates. + + +## [1.10.9] - 2020-07-29 + +Improvements +------------ +- [#6255](https://github.com/rasahq/rasa/issues/6255): Rasa Open Source will no longer add `responses` to the `actions` section of the + domain when persisting the domain as a file. This addresses related problems in Rasa X + when Integrated Version Control introduced big diffs due to the added utterances + in the `actions` section. + +Bugfixes +-------- +- [#6160](https://github.com/rasahq/rasa/issues/6160): Consider entity roles/groups during interactive learning. + + +## [1.10.8] - 2020-07-15 + +### Bugfixes + +* [#6075](https://github.com/rasahq/rasa/issues/6075): Add 'Access-Control-Expose-Headers' for 'filename' header +* [#6137](https://github.com/rasahq/rasa/issues/6137): Fixed a bug where an invalid language variable prevents rasa from finding training examples when importing Dialogflow data. + + +## [1.10.7] - 2020-07-07 + +### Features + +* [#6150](https://github.com/rasahq/rasa/issues/6150): Add `not_supported_language_list` to component to be able to define languages that a component can NOT handle. + + `WhitespaceTokenizer` is not able to process languages which are not separated by whitespace. `WhitespaceTokenizer` + will throw an error if it is used with Chinese, Japanese, and Thai. + +### Bugfixes + +* [#6150](https://github.com/rasahq/rasa/issues/6150): `WhitespaceTokenizer` only removes emoji if complete token matches emoji regex. + + +## [1.10.6] - 2020-07-06 + +### Bugfixes + +* [#6143](https://github.com/rasahq/rasa/issues/6143): Prevent `WhitespaceTokenizer` from outputting empty list of tokens. + +## [1.10.5] - 2020-07-02 + +### Bugfixes + +* [#6119](https://github.com/rasahq/rasa/issues/6119): Explicitly remove all emojis which appear as unicode characters from the output of `regex.sub` inside `WhitespaceTokenizer`. + +## [1.10.4] - 2020-07-01 + +### Bugfixes + +* [#5998](https://github.com/rasahq/rasa/issues/5998): `WhitespaceTokenizer` does not remove vowel signs in Hindi anymore. + +* [#6031](https://github.com/rasahq/rasa/issues/6031): Previously, specifying a lock store in the endpoint configuration with a type other than `redis` or `in_memory` + would lead to an `AttributeError: 'str' object has no attribute 'type'`. This bug is fixed now. + +* [#6032](https://github.com/rasahq/rasa/issues/6032): Fix `Interpreter parsed an intent ...` warning when using the `/model/parse` + endpoint with an NLU-only model. + +* [#6042](https://github.com/rasahq/rasa/issues/6042): Convert entity values coming from any entity extractor to string during evaluation to avoid mismatches due to + different types. + +* [#6078](https://github.com/rasahq/rasa/issues/6078): The assistant will respond through the webex channel to any user (room) communicating to it. Before the bot responded only to a fixed `roomId` set in the `credentials.yml` config file. + +## [1.10.3] - 2020-06-12 + +### Improvements + +* [#3900](https://github.com/rasahq/rasa/issues/3900): Reduced duplicate logs and warnings when running `rasa train`. + +### Bugfixes + +* [#5972](https://github.com/rasahq/rasa/issues/5972): Remove the `clean_up_entities` method from the `DIETClassifier` and `CRFEntityExtractor` as it let to incorrect + entity predictions. + +* [#5976](https://github.com/rasahq/rasa/issues/5976): Fix server crashes that occurred when Rasa Open Source pulls a model from a + [model server](./model-storage.mdx#server-fetch-from-server) and an exception was thrown during + model loading (such as a domain with invalid YAML). + +## [1.10.2] - 2020-06-03 + +### Bugfixes + +* [#5521](https://github.com/rasahq/rasa/issues/5521): Responses used in ResponseSelector now support new lines with explicitly adding `\\n` between them. + +* [#5758](https://github.com/rasahq/rasa/issues/5758): Fixed a bug in [rasa export](https://rasa.com/docs/rasa-x/installation-and-setup/deploy#connect-rasa-deployment) ([Export Conversations to an Event Broker](./command-line-interface.mdx#export-conversations-to-an-event-broker)) which caused Rasa Open Source to only migrate conversation events from the last [Session configuration](./domain.mdx#session-config). + +## [1.10.1] - 2020-05-15 + +### Improvements + +* [#5794](https://github.com/rasahq/rasa/issues/5794): Creating a `Domain` using `Domain.fromDict` can no longer alter the input dictionary. + Previously, there could be problems when the input dictionary was re-used for other + things after creating the `Domain` from it. + +### Bugfixes + +* [#5617](https://github.com/rasahq/rasa/issues/5617): Don't create TensorBoard log files during prediction. + +* [#5638](https://github.com/rasahq/rasa/issues/5638): Fix: DIET breaks with empty spaCy model + +* [#5755](https://github.com/rasahq/rasa/issues/5755): Remove `clean_up_entities` from extractors that extract pre-defined entities. + Just keep the clean up method for entity extractors that extract custom entities. + +* [#5792](https://github.com/rasahq/rasa/issues/5792): Fixed issue where the `DucklingHTTPExtractor` component would + not work if its url contained a trailing slash. + +* [#5825](https://github.com/rasahq/rasa/issues/5825): Fix list index out of range error in `ensure_consistent_bilou_tagging`. + +### Miscellaneous internal changes + +* #5788 + +## [1.10.0] - 2020-04-28 + +### Features + +* [#3765](https://github.com/rasahq/rasa/issues/3765): Add support for entities with roles and grouping of entities in Rasa NLU. + + You can now define a role and/or group label in addition to the entity type for entities. + Use the role label if an entity can play different roles in your assistant. + For example, a city can be a destination or a departure city. + The group label can be used to group multiple entities together. + For example, you could group different pizza orders, so that you know what toppings goes with which pizza and + what size which pizza has. + For more details see [Entities Roles and Groups](./training-data-format.mdx#entities-roles-groups). + + To fill slots from entities with a specific role/group, you need to either use forms or use a custom action. + We updated the tracker method `get_latest_entity_values` to take an optional role/group label. + If you want to use a form, you can add the specific role/group label of interest to the slot mapping function + `from_entity` (see [Forms](./forms.mdx)). + + :::note + Composite entities are currently just supported by the [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) and [CRFEntityExtractor](./components/entity-extractors.mdx#crfentityextractor). + + ::: + +* [#5465](https://github.com/rasahq/rasa/issues/5465): Update training data format for NLU to support entities with a role or group label. + + You can now specify synonyms, roles, and groups of entities using the following data format: + Markdown: + + ``` + [LA]{"entity": "location", "role": "city", "group": "CA", "value": "Los Angeles"} + ``` + + JSON: + + ``` + "entities": [ + { + "start": 10, + "end": 12, + "value": "Los Angeles", + "entity": "location", + "role": "city", + "group": "CA", + } + ] + ``` + + The markdown format `[LA](location:Los Angeles)` is deprecated. To update your training data file just + execute the following command on the terminal of your choice: + `sed -i -E 's/\\[([^)]+)\\]\\(([^)]+):([^)]+)\\)/[\\1]{"entity": "\\2", "value": "\\3"}/g' nlu.md` + + For more information about the new data format see [Training Data Format](./training-data-format.mdx). + +### Improvements + +* [#2224](https://github.com/rasahq/rasa/issues/2224): Suppressed `pika` logs when establishing the connection. These log messages + mostly happened when Rasa X and RabbitMQ were started at the same time. Since RabbitMQ + can take a few seconds to initialize, Rasa X has to re-try until the connection is + established. + In case you suspect a different problem (such as failing authentication) you can + re-enable the `pika` logs by setting the log level to `DEBUG`. To run Rasa Open + Source in debug mode, use the `--debug` flag. To run Rasa X in debug mode, set the + environment variable `DEBUG_MODE` to `true`. + +* [#3419](https://github.com/rasahq/rasa/issues/3419): Include the source filename of a story in the failed stories + + Include the source filename of a story in the failed stories to make it easier to identify the file which contains the failed story. + +* [#5544](https://github.com/rasahq/rasa/issues/5544): Add confusion matrix and “confused_with” to response selection evaluation + + If you are using ResponseSelectors, they now produce similiar outputs during NLU evaluation. Misclassfied responses are listed in a “confused_with” attribute in the evaluation report. Similiarily, a confusion matrix of all responses is plotted. + +* [#5578](https://github.com/rasahq/rasa/issues/5578): Added `socketio` to the compatible channels for [Reminders and External Events](./reminders-and-external-events.mdx). + +* [#5595](https://github.com/rasahq/rasa/issues/5595): Update `POST /model/train` endpoint to accept retrieval action responses + at the `responses` key of the JSON payload. + +* [#5627](https://github.com/rasahq/rasa/issues/5627): All Rasa Open Source images are now using Python 3.7 instead of Python 3.6. + +* [#5635](https://github.com/rasahq/rasa/issues/5635): Update dependencies based on the `dependabot` check. + +* [#5636](https://github.com/rasahq/rasa/issues/5636): Add dropout between `FFNN` and `DenseForSparse` layers in `DIETClassifier`, + `ResponseSelector` and `EmbeddingIntentClassifier` controlled by `use_dense_input_dropout` config parameter. + +* [#5646](https://github.com/rasahq/rasa/issues/5646): `DIETClassifier` only counts as extractor in `rasa test` if it was actually trained for entity recognition. + +* [#5669](https://github.com/rasahq/rasa/issues/5669): Remove regularization gradient for variables that don't have prediction gradient. + +* [#5672](https://github.com/rasahq/rasa/issues/5672): Raise a warning in `CRFEntityExtractor` and `DIETClassifier` if entities are not correctly annotated in the + training data, e.g. their start and end values do not match any start and end values of tokens. + +* [#5690](https://github.com/rasahq/rasa/issues/5690): Add `full_retrieval_intent` property to `ResponseSelector` rankings + +* [#5717](https://github.com/rasahq/rasa/issues/5717): Change default values for hyper-parameters in `EmbeddingIntentClassifier` and `DIETClassifier` + + Use `scale_loss=False` in `DIETClassifier`. Reduce the number of dense dimensions for sparse features of text from 512 to 256 in `EmbeddingIntentClassifier`. + +### Bugfixes + +* [#5230](https://github.com/rasahq/rasa/issues/5230): Fixed issue where posting to certain callback channel URLs would return a 500 error on successful posts due to invalid response format. + +* [#5475](https://github.com/rasahq/rasa/issues/5475): One word can just have one entity label. + + If you are using, for example, `ConveRTTokenizer` words can be split into multiple tokens. + Our entity extractors assign entity labels per token. So, it might happen, that a word, that was split into two tokens, + got assigned two different entity labels. This is now fixed. One word can just have one entity label at a time. + +* [#5509](https://github.com/rasahq/rasa/issues/5509): An entity label should always cover a complete word. + + If you are using, for example, `ConveRTTokenizer` words can be split into multiple tokens. + Our entity extractors assign entity labels per token. So, it might happen, that just a part of a word has + an entity label. This is now fixed. An entity label always covers a complete word. + +* [#5574](https://github.com/rasahq/rasa/issues/5574): Fixed an issue that happened when metadata is passed in a new session. + + Now the metadata is correctly passed to the ActionSessionStart. + +* [#5672](https://github.com/rasahq/rasa/issues/5672): Updated Python dependency `ruamel.yaml` to `>=0.16`. We recommend to use at least + `0.16.10` due to the security issue + [CVE-2019-20478](https://nvd.nist.gov/vuln/detail/CVE-2019-20478) which is present in + in prior versions. + +### Miscellaneous internal changes + +* #5556, #5587, #5614, #5631, #5633 + +## [1.9.7] - 2020-04-23 + +### Improvements + +* [#4606](https://github.com/rasahq/rasa/issues/4606): The stream reading timeout for `rasa shell\` is now configurable by using the + environment variable \`\`RASA_SHELL_STREAM_READING_TIMEOUT_IN_SECONDS`. + This can help to fix problems when using `rasa shell` with custom actions which run + 10 seconds or longer. + +### Bugfixes + +* [#5709](https://github.com/rasahq/rasa/issues/5709): Reverted changes in 1.9.6 that led to model incompatibility. Upgrade to 1.9.7 to fix + `self.sequence_lengths_for(tf_batch_data[TEXT_SEQ_LENGTH][0]) IndexError: list index out of range` + error without needing to retrain earlier 1.9 models. + + Therefore, all 1.9 models except for 1.9.6 will be compatible; a model trained on 1.9.6 will need + to be retrained on 1.9.7. + +## [1.9.6] - 2020-04-15 + +### Bugfixes + +* [#5426](https://github.com/rasahq/rasa/issues/5426): Fix rasa test nlu plotting when using multiple runs. + +* [#5489](https://github.com/rasahq/rasa/issues/5489): Fixed issue where `max_number_of_predictions` was not considered when running end-to-end testing. + +### Miscellaneous internal changes + +* #5626 + +## [1.9.5] - 2020-04-01 + +### Improvements + +* [#5533](https://github.com/rasahq/rasa/issues/5533): Support for + [PostgreSQL schemas](https://www.postgresql.org/docs/11/ddl-schemas.html) in + [SQLTrackerStore](./tracker-stores.mdx#sqltrackerstore). The `SQLTrackerStore` + accesses schemas defined by the `POSTGRESQL_SCHEMA` environment variable if + connected to a PostgreSQL database. + + The schema is added to the connection string option's `-csearch_path` key, e.g. + `-options=-csearch_path=` (see + [https://www.postgresql.org/docs/11/contrib-dblink-connect.html](https://www.postgresql.org/docs/11/contrib-dblink-connect.html) for more details). + As before, if no `POSTGRESQL_SCHEMA` is defined, Rasa uses the database's default + schema (`public`). + + The schema has to exist in the database before connecting, i.e. it needs to have been + created with + + ```postgresql + CREATE SCHEMA schema_name; + ``` + +### Bugfixes + +* [#5547](https://github.com/rasahq/rasa/issues/5547): Fixed ambiguous logging in `DIETClassifier` by adding the name of the calling class to the log message. + +## [1.9.4] - 2020-03-30 + +### Bugfixes + +* [#5529](https://github.com/rasahq/rasa/issues/5529): Fix memory leak problem on increasing number of calls to `/model/parse` endpoint. + +## [1.9.3] - 2020-03-27 + +### Bugfixes + +* [#5505](https://github.com/rasahq/rasa/issues/5505): Set default value for `weight_sparsity` in `ResponseSelector` to `0`. + This fixes a bug in the default behaviour of `ResponseSelector` which was accidentally introduced in `rasa==1.8.0`. + Users should update to this version and re-train their models if `ResponseSelector` was used in their pipeline. + +## [1.9.2] - 2020-03-26 + +### Improved Documentation + +* [#5497](https://github.com/RasaHQ/rasa/pull/5497): Fix documentation to bring back Sara. + +## [1.9.1] - 2020-03-25 + +### Bugfixes + +* [#5492](https://github.com/rasahq/rasa/issues/5492): Fix an issue where the deprecated `queue` parameter for the [Pika Event Broker](./event-brokers.mdx#pika-event-broker) + was ignored and Rasa Open Source published the events to the `rasa_core_events` + queue instead. Note that this does not change the fact that the `queue` argument + is deprecated in favor of the `queues` argument. + +## [1.9.0] - 2020-03-24 + +### Features + +* [#5006](https://github.com/rasahq/rasa/issues/5006): Channel `hangouts` for Rasa integration with Google Hangouts Chat is now supported out-of-the-box. + +* [#5389](https://github.com/rasahq/rasa/issues/5389): Add an optional path to a specific directory to download and cache the pre-trained model weights for [HFTransformersNLP](./components/language-models.mdx#hftransformersnlp). + +* [#5422](https://github.com/rasahq/rasa/issues/5422): Add options `tensorboard_log_directory` and `tensorboard_log_level` to `EmbeddingIntentClassifier`, + `DIETClasifier`, `ResponseSelector`, `EmbeddingPolicy` and `TEDPolicy`. + + By default `tensorboard_log_directory` is `None`. If a valid directory is provided, + metrics are written during training. After the model is trained you can take a look + at the training metrics in tensorboard. Execute `tensorboard --logdir `. + + Metrics can either be written after every epoch (default) or for every training step. + You can specify when to write metrics using the variable `tensorboard_log_level`. + Valid values are 'epoch' and 'minibatch'. + + We also write down a model summary, i.e. layers with inputs and types, to the given directory. + +### Improvements + +* [#4756](https://github.com/rasahq/rasa/issues/4756): Make response timeout configurable. + `rasa run`, `rasa shell` and `rasa x` can now be started with + `--response-timeout ` to configure a response timeout of `` seconds. + +* [#4826](https://github.com/rasahq/rasa/issues/4826): Add full retrieval intent name to message data + `ResponseSelector` will now add the full retrieval intent name + e.g. `faq/which_version` to the prediction, making it accessible + from the tracker. + +* [#5258](https://github.com/rasahq/rasa/issues/5258): Added `PikaEventBroker` ([Pika Event Broker](./event-brokers.mdx#pika-event-broker)) support for publishing to + multiple queues. Messages are now published to a `fanout` exchange with name + `rasa-exchange` (see + [exchange-fanout](https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchange-fanout) + for more information on `fanout` exchanges). + + The former `queue` key is deprecated. Queues should now be + specified as a list in the `endpoints.yml` event broker config under a new key + `queues`. Example config: + + ```yaml + event_broker: + type: pika + url: localhost + username: username + password: password + queues: + - queue-1 + - queue-2 + - queue-3 + ``` + +* [#5416](https://github.com/rasahq/rasa/issues/5416): Change `rasa init` to include `tests/conversation_tests.md` file by default. + +* [#5446](https://github.com/rasahq/rasa/issues/5446): The endpoint `PUT /conversations//tracker/events` no longer + adds session start events (to learn more about conversation sessions, please + see [Session configuration](./domain.mdx#session-config)) in addition to the events which were sent in the request + payload. To achieve the old behavior send a + `GET /conversations//tracker` + request before appending events. + +* [#5482](https://github.com/rasahq/rasa/issues/5482): Make `scale_loss` for intents behave the same way as in versions below `1.8`, but + only scale if some of the examples in a batch has probability of the golden label more than `0.5`. + Introduce `scale_loss` for entities in `DIETClassifier`. + +### Bugfixes + +* [#5205](https://github.com/rasahq/rasa/issues/5205): Fixed the bug when FormPolicy was overwriting MappingPolicy prediction (e.g. `/restart`). + Priorities for [Mapping Policy](./policies.mdx#mapping-policy) and [Form Policy](./policies.mdx#form-policy) are no longer linear: + `FormPolicy` priority is 5, but its prediction is ignored if `MappingPolicy` is used for prediction. + +* [#5215](https://github.com/rasahq/rasa/issues/5215): Fixed issue related to storing Python `float` values as `decimal.Decimal` objects + in DynamoDB tracker stores. All `decimal.Decimal` objects are now converted to + `float` on tracker retrieval. + + Added a new docs section on [DynamoTrackerStore](./tracker-stores.mdx#dynamotrackerstore). + +* [#5356](https://github.com/rasahq/rasa/issues/5356): Fixed bug where `FallbackPolicy` would always fall back if the fallback action is + `action_listen`. + +* [#5361](https://github.com/rasahq/rasa/issues/5361): Fixed bug where starting or ending a response with `\\n\\n` led to one of the responses returned being empty. + +* [#5405](https://github.com/rasahq/rasa/issues/5405): Fixes issue where model always gets retrained if multiple NLU/story files are in a + directory, by sorting the list of files. + +* [#5444](https://github.com/rasahq/rasa/issues/5444): Fixed ambiguous logging in DIETClassifier by adding the name of the calling class to the log message. + +### Improved Documentation + +* [#2237](https://github.com/rasahq/rasa/issues/2237): Restructure the “Evaluating models” documentation page and rename this page to [Testing Your Assistant](./testing-your-assistant.mdx). + +* [#5302](https://github.com/rasahq/rasa/issues/5302): Improved documentation on how to build and deploy an action server image for use on other servers such as Rasa X deployments. + +### Miscellaneous internal changes + +* #5340 + +## [1.8.3] - 2020-03-27 + +### Bugfixes + +* [#5405](https://github.com/rasahq/rasa/issues/5405): Fixes issue where model always gets retrained if multiple NLU/story files are in a + directory, by sorting the list of files. + +* [#5444](https://github.com/rasahq/rasa/issues/5444): Fixed ambiguous logging in DIETClassifier by adding the name of the calling class to the log message. + +* [#5506](https://github.com/rasahq/rasa/issues/5506): Set default value for `weight_sparsity` in `ResponseSelector` to `0`. + This fixes a bug in the default behaviour of `ResponseSelector` which was accidentally introduced in `rasa==1.8.0`. + Users should update to this version or `rasa>=1.9.3` and re-train their models if `ResponseSelector` was used in their pipeline. + +### Improved Documentation + +* [#5302](https://github.com/rasahq/rasa/issues/5302): Improved documentation on how to build and deploy an action server image for use on other servers such as Rasa X deployments. + +## [1.8.2] - 2020-03-19 + +### Bugfixes + +* [#5438](https://github.com/rasahq/rasa/issues/5438): Fixed bug when installing rasa with `poetry`. + +* [#5413](https://github.com/RasaHQ/rasa/issues/5413): Fixed bug with `EmbeddingIntentClassifier`, where results + weren't the same as in 1.7.x. Fixed by setting weight sparsity to 0. + +### Improved Documentation + +* [#5404](https://github.com/rasahq/rasa/issues/5404): Explain how to run commands as `root` user in Rasa SDK Docker images since version + `1.8.0`. Since version `1.8.0` the Rasa SDK Docker images does not longer run as + `root` user by default. For commands which require `root` user usage, you have to + switch back to the `root` user in your Docker image as described in + [Building an Action Server Image](./how-to-deploy.mdx#building-an-action-server-image). + +* [#5402](https://github.com/RasaHQ/rasa/issues/5402): Made improvements to Building Assistants tutorial + +## [1.8.1] - 2020-03-06 + +### Bugfixes + +* [#5354](https://github.com/rasahq/rasa/issues/5354): Fixed issue with using language models like `xlnet` along with `entity_recognition` set to `True` inside + `DIETClassifier`. + +### Miscellaneous internal changes + +* #5330, #5348 + +## [1.8.0] - 2020-02-26 + +### Deprecations and Removals + +* [#4991](https://github.com/rasahq/rasa/issues/4991): Removed `Agent.continue_training` and the `dump_flattened_stories` parameter + from `Agent.persist`. + +* [#5266](https://github.com/rasahq/rasa/issues/5266): Properties `Component.provides` and `Component.requires` are deprecated. + Use `Component.required_components()` instead. + +### Features + +* [#2674](https://github.com/rasahq/rasa/issues/2674): Add default value `__other__` to `values` of a `CategoricalSlot`. + + All values not mentioned in the list of values of a `CategoricalSlot` + will be mapped to `__other__` for featurization. + +* [#4088](https://github.com/rasahq/rasa/issues/4088): Add story structure validation functionality (e.g. rasa data validate stories –max-history 5). + +* [#5065](https://github.com/rasahq/rasa/issues/5065): Add [LexicalSyntacticFeaturizer](./components/featurizers.mdx#lexicalsyntacticfeaturizer) to sparse featurizers. + + `LexicalSyntacticFeaturizer` does the same featurization as the `CRFEntityExtractor`. We extracted the + featurization into a separate component so that the features can be reused and featurization is independent from the + entity extraction. + +* [#5187](https://github.com/rasahq/rasa/issues/5187): Integrate language models from HuggingFace's [Transformers](https://github.com/huggingface/transformers) Library. + + Add a new NLP component [HFTransformersNLP](./components/language-models.mdx#hftransformersnlp) which tokenizes and featurizes incoming messages using a specified + pre-trained model with the Transformers library as the backend. + Add [LanguageModelTokenizer](./components/tokenizers.mdx#languagemodeltokenizer) and [LanguageModelFeaturizer](./components/featurizers.mdx#languagemodelfeaturizer) which use the information from + [HFTransformersNLP](./components/language-models.mdx#hftransformersnlp) and sets them correctly for message object. + Language models currently supported: BERT, OpenAIGPT, GPT-2, XLNet, DistilBert, RoBERTa. + +* [#5225](https://github.com/rasahq/rasa/issues/5225): Added a new CLI command `rasa export` to publish tracker events from a persistent + tracker store using an event broker. See [Export Conversations to an Event Broker](./command-line-interface.mdx#export-conversations-to-an-event-broker), [Tracker Stores](./tracker-stores.mdx) + and [Event Brokers](./event-brokers.mdx) for more details. + +* [#5230](https://github.com/rasahq/rasa/issues/5230): Refactor how GPU and CPU environments are configured for TensorFlow 2.0. + + Please refer to the [documentation](./tuning-your-model.mdx#configuring-tensorflow) to understand + which environment variables to set in what scenarios. A couple of examples are shown below as well: + + ```python + # This specifies to use 1024 MB of memory from GPU with logical ID 0 and 2048 MB of memory from GPU with logical ID 1 + TF_GPU_MEMORY_ALLOC="0:1024, 1:2048" + + # Specifies that at most 3 CPU threads can be used to parallelize multiple non-blocking operations + TF_INTER_OP_PARALLELISM_THREADS="3" + + # Specifies that at most 2 CPU threads can be used to parallelize a particular operation. + TF_INTRA_OP_PARALLELISM_THREADS="2" + ``` + +* [#5266](https://github.com/rasahq/rasa/issues/5266): Added a new NLU component [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) and a new policy [TEDPolicy](./policies.mdx#ted-policy). + + DIET (Dual Intent and Entity Transformer) is a multi-task architecture for intent classification and entity + recognition. You can read more about this component in our [documentation](./components/intent-classifiers.mdx#dietclassifier). + The new component will replace the `EmbeddingIntentClassifier` and the + [CRFEntityExtractor](./components/entity-extractors.mdx#crfentityextractor) in the future. + Those two components are deprecated from now on. + See [migration guide](./migration-guide.mdx#migration-to-rasa-1-8) for details on how to + switch to the new component. + + [TEDPolicy](./policies.mdx#ted-policy) is the new name for [EmbeddingPolicy](./policies.mdx#embedding-policy). + `EmbeddingPolicy` is deprecated from now on. + The functionality of `TEDPolicy` and `EmbeddingPolicy` is the same. + Please update your configuration file to use the new name for the policy. + +* [#663](https://github.com/rasahq/rasa/issues/663): The sentence vector of the `SpacyFeaturizer` and `MitieFeaturizer` can be calculated using max or mean pooling. + + To specify the pooling operation, set the option `pooling` for the `SpacyFeaturizer` or the `MitieFeaturizer` + in your configuration file. The default pooling operation is `mean`. The mean pooling operation also does not take + into account words, that do not have a word vector. + +### Improvements + +* [#3975](https://github.com/rasahq/rasa/issues/3975): Added command line argument `--conversation-id` to `rasa interactive`. + If the argument is not given, `conversation_id` defaults to a random uuid. + +* [#4653](https://github.com/rasahq/rasa/issues/4653): Added a new command-line argument `--init-dir` to command `rasa init` to specify + the directory in which the project is initialised. + +* [#4682](https://github.com/rasahq/rasa/issues/4682): Added support to send images with the twilio output channel. + +* [#4817](https://github.com/rasahq/rasa/issues/4817): Part of Slack sanitization: + Multiple garbled URL's in a string coming from slack will be converted into actual strings. + `Example: health check of and to health check of + eemdb.net and eemdb1.net` + +* [#5117](https://github.com/rasahq/rasa/issues/5117): New command-line argument –conversation-id will be added and wiil give the ability to + set specific conversation ID for each shell session, if not passed will be random. + +* [#5211](https://github.com/rasahq/rasa/issues/5211): Messages sent to the [Pika Event Broker](./event-brokers.mdx#pika-event-broker) are now persisted. This guarantees + the RabbitMQ will re-send previously received messages after a crash. Note that this + does not help for the case where messages are sent to an unavailable RabbitMQ instance. + +* [#5250](https://github.com/rasahq/rasa/issues/5250): Added support for mattermost connector to use bot accounts. + +* [#5266](https://github.com/rasahq/rasa/issues/5266): We updated our code to TensorFlow 2. + +* [#5317](https://github.com/rasahq/rasa/issues/5317): Events exported using `rasa export` receive a message header if published through a + `PikaEventBroker`. The header is added to the message's `BasicProperties.headers` + under the `rasa-export-process-id` key + (`rasa.core.constants.RASA_EXPORT_PROCESS_ID_HEADER_NAME`). The value is a + UUID4 generated at each call of `rasa export`. The resulting header is a key-value + pair that looks as follows: + + ```text + 'rasa-export-process-id': 'd3b3d3ffe2bd4f379ccf21214ccfb261' + ``` + +* [#5292](https://github.com/rasahq/rasa/issues/5292): Added `followlinks=True` to os.walk calls, to allow the use of symlinks in training, NLU and domain data. + +* [#4811](https://github.com/rasahq/rasa/issues/4811): Support invoking a `SlackBot` by direct messaging or `@` mentions. + +### Bugfixes + +* [#4006](https://github.com/rasahq/rasa/issues/4006): Fixed timestamp parsing warning when using DucklingHTTPExtractor + +* [#4601](https://github.com/rasahq/rasa/issues/4601): Fixed issue with `action_restart` getting overridden by `action_listen` when the `MappingPolicy` and the + [TwoStageFallbackPolicy](./policies.mdx#two-stage-fallback-policy) are used together. + +* [#5201](https://github.com/rasahq/rasa/issues/5201): Fixed incorrectly raised Error encountered in pipelines with a `ResponseSelector` and NLG. + + When NLU training data is split before NLU pipeline comparison, + NLG responses were not also persisted and therefore training for a pipeline including the `ResponseSelector` would fail. + + NLG responses are now persisted along with NLU data to a `/train` directory in the `run_x/xx%_exclusion` folder. + +* [#5277](https://github.com/rasahq/rasa/issues/5277): Fixed sending custom json with Twilio channel + +### Improved Documentation + +* [#5174](https://github.com/rasahq/rasa/issues/5174): Updated the documentation to properly suggest not to explicitly add utterance actions to the domain. + +* [#5189](https://github.com/rasahq/rasa/issues/5189): Added user guide for reminders and external events, including `reminderbot` demo. + +### Miscellaneous internal changes + +* #3923, #4597, #4903, #5180, #5189, #5266, #699 + +## [1.7.4] - 2020-02-24 + +### Bugfixes + +* [#5068](https://github.com/rasahq/rasa/issues/5068): Tracker stores supporting conversation sessions (`SQLTrackerStore` and + `MongoTrackerStore`) do not save the tracker state to database immediately after + starting a new conversation session. This leads to the number of events being saved + in addition to the already-existing ones to be calculated correctly. + + This fixes `action_listen` events being saved twice at the beginning of + conversation sessions. + +## [1.7.3] - 2020-02-21 + +### Bugfixes + +* [#5231](https://github.com/rasahq/rasa/issues/5231): Fix segmentation fault when running `rasa train` or `rasa shell`. + +### Improved Documentation + +* [#5286](https://github.com/rasahq/rasa/issues/5286): Fix doc links on “Deploying your Assistant” page + +## [1.7.2] - 2020-02-13 + +### Bugfixes + +* [#5197](https://github.com/rasahq/rasa/issues/5197): Fixed incompatibility of Oracle with the [SQLTrackerStore](./tracker-stores.mdx#sqltrackerstore), by using a `Sequence` + for the primary key columns. This does not change anything for SQL databases other than Oracle. + If you are using Oracle, please create a sequence with the instructions in the [SQLTrackerStore](./tracker-stores.mdx#sqltrackerstore) docs. + +### Improved Documentation + +* [#5197](https://github.com/rasahq/rasa/issues/5197): Added section on setting up the SQLTrackerStore with Oracle + +* [#5210](https://github.com/rasahq/rasa/issues/5210): Renamed “Running the Server” page to “Configuring the HTTP API” + +## [1.7.1] - 2020-02-11 + +### Bugfixes + +* [#5106](https://github.com/rasahq/rasa/issues/5106): Fixed file loading of non proper UTF-8 story files, failing properly when checking for + story files. + +* [#5162](https://github.com/rasahq/rasa/issues/5162): Fix problem with multi-intents. + Training with multi-intents using the `CountVectorsFeaturizer` together with `EmbeddingIntentClassifier` is + working again. + +* [#5171](https://github.com/rasahq/rasa/issues/5171): Fix bug `ValueError: Cannot concatenate sparse features as sequence dimension does not match`. + + When training a Rasa model that contains responses for just some of the intents, training was failing. + Fixed the featurizers to return a consistent feature vector in case no response was given for a specific message. + +* [#5199](https://github.com/rasahq/rasa/issues/5199): If no text features are present in `EmbeddingIntentClassifier` return the intent `None`. + +* [#5216](https://github.com/rasahq/rasa/issues/5216): Resolve version conflicts: Pin version of cloudpickle to ~=1.2.0. + +## [1.7.0] - 2020-01-29 + +### Deprecations and Removals + +* [#4964](https://github.com/rasahq/rasa/issues/4964): The endpoint `/conversations//execute` is now deprecated. Instead, users should use + the `/conversations//trigger_intent` endpoint and thus trigger intents instead of actions. + +* [#4978](https://github.com/rasahq/rasa/issues/4978): Remove option `use_cls_token` from tokenizers and option `return_sequence` from featurizers. + + By default all tokenizer add a special token (`__CLS__`) to the end of the list of tokens. + This token will be used to capture the features of the whole utterance. + + The featurizers will return a matrix of size (number-of-tokens x feature-dimension) by default. + This allows to train sequence models. + However, the feature vector of the `__CLS__` token can be used to train non-sequence models. + The corresponding classifier can decide what kind of features to use. + +### Features + +* [#400](https://github.com/rasahq/rasa/issues/400): Rename `templates` key in domain to `responses`. + + `templates` key will still work for backwards compatibility but will raise a future warning. + +* [#4902](https://github.com/rasahq/rasa/issues/4902): Added a new configuration parameter, `ranking_length` to the `EmbeddingPolicy`, `EmbeddingIntentClassifier`, + and `ResponseSelector` classes. + +* [#4964](https://github.com/rasahq/rasa/issues/4964): External events and reminders now trigger intents (and entities) instead of actions. + + Add new endpoint `/conversations//trigger_intent`, which lets the user specify an intent and a + list of entities that is injected into the conversation in place of a user message. The bot then predicts and + executes a response action. + +* [#4978](https://github.com/rasahq/rasa/issues/4978): Add `ConveRTTokenizer`. + + The tokenizer should be used whenever the `ConveRTFeaturizer` is used. + + Every tokenizer now supports the following configuration options: + `intent_tokenization_flag`: Flag to check whether to split intents (default `False`). + `intent_split_symbol`: Symbol on which intent should be split (default `_`) + +### Improvements + +* [#1988](https://github.com/rasahq/rasa/issues/1988): Remove the need of specifying utter actions in the `actions` section explicitly if these actions are already + listed in the `templates` section. + +* [#4877](https://github.com/rasahq/rasa/issues/4877): Entity examples that have been extracted using an external extractor are excluded + from Markdown dumping in `MarkdownWriter.dumps()`. The excluded external extractors + are `DucklingHTTPExtractor` and `SpacyEntityExtractor`. + +* [#4902](https://github.com/rasahq/rasa/issues/4902): The `EmbeddingPolicy`, `EmbeddingIntentClassifier`, and `ResponseSelector` now by default normalize confidence + levels over the top 10 results. See [Rasa 1.6 to Rasa 1.7](./migration-guide.mdx#migration-to-rasa-1-7) for more details. + +* [#4964](https://github.com/rasahq/rasa/issues/4964): `ReminderCancelled` can now cancel multiple reminders if no name is given. It still cancels a single + reminder if the reminder's name is specified. + +### Bugfixes + +* [#4774](https://github.com/rasahq/rasa/issues/4774): Requests to `/model/train` do not longer block other requests to the Rasa server. + +* [#4896](https://github.com/rasahq/rasa/issues/4896): Fixed default behavior of `rasa test core --evaluate-model-directory` when called without `--model`. Previously, the latest model file was used as `--model`. Now the default model directory is used instead. + + New behavior of `rasa test core --evaluate-model-directory` when given an existing file as argument for `--model`: Previously, this led to an error. Now a warning is displayed and the directory containing the given file is used as `--model`. + +* [#5040](https://github.com/rasahq/rasa/issues/5040): Updated the dependency `networkx` from 2.3.0 to 2.4.0. The old version created incompatibilities when using pip. + + There is an imcompatibility between Rasa dependecy requests 2.22.0 and the own depedency from Rasa for networkx raising errors upon pip install. There is also a bug corrected in `requirements.txt` which used `~=` instead of `==`. All of these are fixed using networkx 2.4.0. + +* [#5057](https://github.com/rasahq/rasa/issues/5057): Fixed compatibility issue with Microsoft Bot Framework Emulator if `service_url` lacked a trailing `/`. + +* [#5092](https://github.com/rasahq/rasa/issues/5092): DynamoDB tracker store decimal values will now be rounded on save. Previously values exceeding 38 digits caused an unhandled error. + +### Miscellaneous internal changes + +* #4458, #4664, #4780, #5029 + +## [1.6.2] - 2020-01-28 + +### Improvements + +* [#4994](https://github.com/rasahq/rasa/issues/4994): Switching back to a TensorFlow release which only includes CPU support to reduce the + size of the dependencies. If you want to use the TensorFlow package with GPU support, + please run `pip install tensorflow-gpu==1.15.0`. + +### Bugfixes + +* [#5111](https://github.com/rasahq/rasa/issues/5111): Fixes `Exception 'Loop' object has no attribute '_ready'` error when running + `rasa init`. + +* [#5126](https://github.com/rasahq/rasa/issues/5126): Updated the end-to-end ValueError you recieve when you have a invalid story format to point + to the updated doc link. + +## [1.6.1] - 2020-01-07 + +### Bugfixes + +* [#4989](https://github.com/rasahq/rasa/issues/4989): Use an empty domain in case a model is loaded which has no domain + (avoids errors when accessing `agent.doman.`). + +* [#4995](https://github.com/rasahq/rasa/issues/4995): Replace error message with warning in tokenizers and featurizers if default parameter not set. + +* [#5019](https://github.com/rasahq/rasa/issues/5019): Pin sanic patch version instead of minor version. Fixes sanic `_run_request_middleware()` error. + +* [#5032](https://github.com/rasahq/rasa/issues/5032): Fix wrong calculation of additional conversation events when saving the conversation. + This led to conversation events not being saved. + +* [#5032](https://github.com/rasahq/rasa/issues/5032): Fix wrong order of conversation events when pushing events to conversations via + `POST /conversations//tracker/events`. + +## [1.6.0] - 2019-12-18 + +### Deprecations and Removals + +* [#4935](https://github.com/rasahq/rasa/issues/4935): Removed `ner_features` as a feature name from `CRFEntityExtractor`, use `text_dense_features` instead. + + The following settings match the previous `NGramFeaturizer`: + + ```yaml + - name: 'CountVectorsFeaturizer' + analyzer: 'char_wb' + min_ngram: 3 + max_ngram: 17 + max_features: 10 + min_df: 5 + ``` + +* [#4957](https://github.com/rasahq/rasa/issues/4957): To [use custom features in the `CRFEntityExtractor`](./components/entity-extractors.mdx#passing-custom-features-to-crfentityextractor) + use `text_dense_features` instead of `ner_features`. If + `text_dense_features` are present in the feature set, the `CRFEntityExtractor` will automatically make use of + them. Just make sure to add a dense featurizer in front of the `CRFEntityExtractor` in your pipeline and set the + flag `return_sequence` to `True` for that featurizer. + +* [#4990](https://github.com/rasahq/rasa/issues/4990): Deprecated `Agent.continue_training`. Instead, a model should be retrained. + +* [#684](https://github.com/rasahq/rasa/issues/684): Specifying lookup tables directly in the NLU file is now deprecated. Please specify + them in an external file. + +### Features + +* [#4795](https://github.com/rasahq/rasa/issues/4795): Replaced the warnings about missing templates, intents etc. in validator.py by debug messages. + +* [#4830](https://github.com/rasahq/rasa/issues/4830): Added conversation sessions to trackers. + + A conversation session represents the dialog between the assistant and a user. + Conversation sessions can begin in three ways: 1. the user begins the conversation + with the assistant, 2. the user sends their first message after a configurable period + of inactivity, or 3. a manual session start is triggered with the `/session_start` + intent message. The period of inactivity after which a new conversation session is + triggered is defined in the domain using the `session_expiration_time` key in the + `session_config` section. The introduction of conversation sessions comprises the + following changes: + + * Added a new event `SessionStarted` that marks the beginning of a new conversation + session. + + * Added a new default action `ActionSessionStart`. This action takes all + `SlotSet` events from the previous session and applies it to the next session. + + * Added a new default intent `session_start` which triggers the start of a new + conversation session. + + * `SQLTrackerStore` and `MongoTrackerStore` only retrieve + events from the last session from the database. + + :::note + The session behaviour is disabled for existing projects, i.e. existing domains + without session config section. + + ::: + +* [#4935](https://github.com/rasahq/rasa/issues/4935): Preparation for an upcoming change in the `EmbeddingIntentClassifier`: + + Add option `use_cls_token` to all tokenizers. If it is set to `True`, the token `__CLS__` will be added to + the end of the list of tokens. Default is set to `False`. No need to change the default value for now. + + Add option `return_sequence` to all featurizers. By default all featurizers return a matrix of size + (1 x feature-dimension). If the option `return_sequence` is set to `True`, the corresponding featurizer will return + a matrix of size (token-length x feature-dimension). See [Text Featurizers](./components/featurizers.mdx). + Default value is set to `False`. However, you might want to set it to `True` if you want to use custom features + in the `CRFEntityExtractor`. + See [passing custom features to the `CRFEntityExtractor`](./components/entity-extractors.mdx#passing-custom-features-to-crfentityextractor) + + Changed some featurizers to use sparse features, which should reduce memory usage with large amounts of training data significantly. + Read more: [Text Featurizers](./components/featurizers.mdx) . + + :::caution + These changes break model compatibility. You will need to retrain your old models! + + ::: + +### Improvements + +* [#3549](https://github.com/rasahq/rasa/issues/3549): Added `--no-plot` option for `rasa test` command, which disables rendering of confusion matrix and histogram. By default plots will be rendered. + +* [#4086](https://github.com/rasahq/rasa/issues/4086): If matplotlib couldn't set up a default backend, it will be set automatically to TkAgg/Agg one + +* [#4647](https://github.com/rasahq/rasa/issues/4647): Add the option `\`random_seed\`` to the `\`rasa data split nlu\`` command to generate + reproducible train/test splits. + +* [#4734](https://github.com/rasahq/rasa/issues/4734): Changed `url` `__init__()` arguments for custom tracker stores to `host` to reflect the `__init__` arguments of + currently supported tracker stores. Note that in `endpoints.yml`, these are still declared as `url`. + +* [#4751](https://github.com/rasahq/rasa/issues/4751): The `kafka-python` dependency has become as an “extra” dependency. To use the + `KafkaEventConsumer`, `rasa` has to be installed with the `[kafka]` option, i.e. + + ```bash + $ pip install rasa[kafka] + ``` + +* [#4801](https://github.com/rasahq/rasa/issues/4801): Allow creation of natural language interpreter and generator by classname reference + in `endpoints.yml`. + +* [#4834](https://github.com/rasahq/rasa/issues/4834): Made it explicit that interactive learning does not work with NLU-only models. + + Interactive learning no longer trains NLU-only models if no model is provided + and no core data is provided. + +* [#4899](https://github.com/rasahq/rasa/issues/4899): The `intent_report.json` created by `rasa test` now creates an extra field + `confused_with` for each intent. This is a dictionary containing the names of + the most common false positives when this intent should be predicted, and the + number of such false positives. + +* [#4976](https://github.com/rasahq/rasa/issues/4976): `rasa test nlu --cross-validation` now also includes an evaluation of the response selector. + As a result, the train and test F1-score, accuracy and precision is logged for the response selector. + A report is also generated in the `results` folder by the name `response_selection_report.json` + +### Bugfixes + +* [#4635](https://github.com/rasahq/rasa/issues/4635): If a `wait_time_between_pulls` is configured for the model server in `endpoints.yml`, + this will be used instead of the default one when running Rasa X. + +* [#4759](https://github.com/rasahq/rasa/issues/4759): Training Luis data with `luis_schema_version` higher than 4.x.x will show a warning instead of throwing an exception. + +* [#4799](https://github.com/rasahq/rasa/issues/4799): Running `rasa interactive` with no NLU data now works, with the functionality of `rasa interactive core`. + +* [#4917](https://github.com/rasahq/rasa/issues/4917): When loading models from S3, namespaces (folders within a bucket) are now respected. + Previously, this would result in an error upon loading the model. + +* [#4925](https://github.com/rasahq/rasa/issues/4925): “rasa init” will ask if user wants to train a model + +* [#4942](https://github.com/rasahq/rasa/issues/4942): Pin `multidict` dependency to 4.6.1 to prevent sanic from breaking, + see [https://github.com/huge-success/sanic/issues/1729](https://github.com/huge-success/sanic/issues/1729) + +* [#4985](https://github.com/rasahq/rasa/issues/4985): Fix errors during training and testing of `ResponseSelector`. + +## [1.5.3] - 2019-12-11 + +### Improvements + +* [#4933](https://github.com/rasahq/rasa/issues/4933): Improved error message that appears when an incorrect parameter is passed to a policy. + +### Bugfixes + +* [#4914](https://github.com/rasahq/rasa/issues/4914): Added `rasa/nlu/schemas/config.yml` to wheel package + +* [#4942](https://github.com/rasahq/rasa/issues/4942): Pin `multidict` dependency to 4.6.1 to prevent sanic from breaking, + see [https://github.com/huge-success/sanic/issues/1729](https://github.com/huge-success/sanic/issues/1729) + +## [1.5.2] - 2019-12-09 + +### Improvements + +* [#3684](https://github.com/rasahq/rasa/issues/3684): `rasa interactive` will skip the story visualization of training stories in case + there are more than 200 stories. Stories created during interactive learning will be + visualized as before. + +* [#4792](https://github.com/rasahq/rasa/issues/4792): The log level for SocketIO loggers, including `websockets.protocol`, `engineio.server`, + and `socketio.server`, is now handled by the `LOG_LEVEL_LIBRARIES` environment variable, + where the default log level is `ERROR`. + +* [#4873](https://github.com/rasahq/rasa/issues/4873): Updated all example bots and documentation to use the updated `dispatcher.utter_message()` method from rasa-sdk==1.5.0. + +### Bugfixes + +* [#3684](https://github.com/rasahq/rasa/issues/3684): `rasa interactive` will not load training stories in case the visualization is + skipped. + +* [#4789](https://github.com/rasahq/rasa/issues/4789): Fixed error where spacy models where not found in the docker images. + +* [#4802](https://github.com/rasahq/rasa/issues/4802): Fixed unnecessary `kwargs` unpacking in `rasa.test.test_core` call in `rasa.test.test` function. + +* [#4898](https://github.com/rasahq/rasa/issues/4898): Training data files now get loaded in the same order (especially relevant to subdirectories) each time to ensure training consistency when using a random seed. + +* [#4918](https://github.com/rasahq/rasa/issues/4918): Locks for tickets in `LockStore` are immediately issued without a redundant + check for their availability. + +### Improved Documentation + +* [#4844](https://github.com/rasahq/rasa/issues/4844): Added `towncrier` to automatically collect changelog entries. + +* [#4869](https://github.com/rasahq/rasa/issues/4869): Document the pipeline for `pretrained_embeddings_convert` in the pre-configured pipelines section. + +* [#4894](https://github.com/rasahq/rasa/issues/4894): `Proactively Reaching Out to the User Using Actions` now correctly links to the + endpoint specification. + +## [1.5.1] - 2019-11-27 + +### Improvements + +* When NLU training data is dumped as Markdown file the intents are not longer ordered + alphabetically, but in the original order of given training data + +### Bugfixes + +* End to end stories now support literal payloads which specify entities, e.g. + `greet: /greet{"name": "John"}` + +* Slots will be correctly interpolated if there are lists in custom response templates. + +* Fixed compatibility issues with `rasa-sdk` `1.5` + +* Updated `/status` endpoint to show correct path to model archive + +## [1.5.0] - 2019-11-26 + +### Features + +* Added data validator that checks if domain object returned is empty. If so, exit early + from the command `rasa data validate`. + +* Added the KeywordIntentClassifier. + +* Added documentation for `AugmentedMemoizationPolicy`. + +* Fall back to `InMemoryTrackerStore` in case there is any problem with the current + tracker store. + +* Arbitrary metadata can now be attached to any `Event` subclass. The data must be + stored under the `metadata` key when reading the event from a JSON object or + dictionary. + +* Add command line argument `rasa x --config CONFIG`, to specify path to the policy + and NLU pipeline configuration of your bot (default: `config.yml`). + +* Added a new NLU featurizer - `ConveRTFeaturizer` based on [ConveRT](https://github.com/PolyAI-LDN/polyai-models) model released by PolyAI. + +* Added a new preconfigured pipeline - `pretrained_embeddings_convert`. + +### Improvements + +* Do not retrain the entire Core model if only the `templates` section of the domain + is changed. + +* Upgraded `jsonschema` version. + +### Deprecations and Removals + +* Remove duplicate messages when creating training data (issues/1446). + +### Bugfixes + +* `MultiProjectImporter` now imports files in the order of the import statements + +* Fixed server hanging forever on leaving `rasa shell` before first message + +* Fixed rasa init showing traceback error when user does Keyboard Interrupt before choosing a project path + +* `CountVectorsFeaturizer` featurizes intents only if its analyzer is set to `word` + +* Fixed bug where facebooks generic template was not rendered when buttons were `None` + +* Fixed default intents unnecessarily raising undefined parsing error + +## [1.4.6] - 2019-11-22 + +### Bugfixes + +* Fixed Rasa X not working when any tracker store was configured for Rasa. + +* Use the matplotlib backend `agg` in case the `tkinter` package is not installed. + +## [1.4.5] - 2019-11-14 + +### Bugfixes + +* NLU-only models no longer throw warnings about parsing features not defined in the domain + +* Fixed bug that stopped Dockerfiles from building version 1.4.4. + +* Fixed format guessing for e2e stories with intent restated as `/intent` + +## [1.4.4] - 2019-11-13 + +### Features + +* `PikaEventProducer` adds the RabbitMQ `App ID` message property to published + messages with the value of the `RASA_ENVIRONMENT` environment variable. The + message property will not be assigned if this environment variable isn't set. + +### Improvements + +* Updated Mattermost connector documentation to be more clear. + +* Updated format strings to f-strings where appropriate. + +* Updated tensorflow requirement to `1.15.0` + +* Dump domain using UTF-8 (to avoid `\\UXXXX` sequences in the dumped files) + +### Bugfixes + +* Fixed exporting NLU training data in `json` format from `rasa interactive` + +* Fixed numpy deprecation warnings + +## [1.4.3] - 2019-10-29 + +### Bugfixes + +* Fixed `Connection reset by peer` errors and bot response delays when using the + RabbitMQ event broker. + +## [1.4.2] - 2019-10-28 + +### Deprecations and Removals + +* TensorFlow deprecation warnings are no longer shown when running `rasa x` + +### Bugfixes + +* Fixed `'Namespace' object has no attribute 'persist_nlu_data'` error during + interactive learning + +* Pinned networkx~=2.3.0 to fix visualization in rasa interactive and Rasa X + +* Fixed `No model found` error when using `rasa run actions` with “actions” + as a directory. + +## [1.4.1] - 2019-10-22 + +Regression: changes from `1.2.12` were missing from `1.4.0`, readded them + +## [1.4.0] - 2019-10-19 + +### Features + +* add flag to CLI to persist NLU training data if needed + +* log a warning if the `Interpreter` picks up an intent or an entity that does not + exist in the domain file. + +* added `DynamoTrackerStore` to support persistence of agents running on AWS + +* added docstrings for `TrackerStore` classes + +* added buttons and images to mattermost. + +* `CRFEntityExtractor` updated to accept arbitrary token-level features like word + vectors (issues/4214) + +* `SpacyFeaturizer` updated to add `ner_features` for `CRFEntityExtractor` + +* Sanitizing incoming messages from slack to remove slack formatting like `` + or `` and substitute it with original content + +* Added the ability to configure the number of Sanic worker processes in the HTTP + server (`rasa.server`) and input channel server + (`rasa.core.agent.handle_channels()`). The number of workers can be set using the + environment variable `SANIC_WORKERS` (default: 1). A value of >1 is allowed only in + combination with `RedisLockStore` as the lock store. + +* Botframework channel can handle uploaded files in `UserMessage` metadata. + +* Added data validator that checks there is no duplicated example data across multiples intents + +### Improvements + +* Unknown sections in markdown format (NLU data) are not ignored anymore, but instead an error is raised. + +* It is now easier to add metadata to a `UserMessage` in existing channels. + You can do so by overwriting the method `get_metadata`. The return value of this + method will be passed to the `UserMessage` object. + +* Tests can now be run in parallel + +* Serialise `DialogueStateTracker` as json instead of pickle. **DEPRECATION warning**: + Deserialisation of pickled trackers will be deprecated in version 2.0. For now, + trackers are still loaded from pickle but will be dumped as json in any subsequent + save operations. + +* Event brokers are now also passed to custom tracker stores (using the `event_broker` parameter) + +* Don't run the Rasa Docker image as `root`. + +* Use multi-stage builds to reduce the size of the Rasa Docker image. + +* Updated the `/status` api route to use the actual model file location instead of the `tmp` location. + +### Deprecations and Removals + +* **Removed Python 3.5 support** + +### Bugfixes + +* fixed missing `tkinter` dependency for running tests on Ubuntu + +* fixed issue with `conversation` JSON serialization + +* fixed the hanging HTTP call with `ner_duckling_http` pipeline + +* fixed Interactive Learning intent payload messages saving in nlu files + +* fixed DucklingHTTPExtractor dimensions by actually applying to the request + +## [1.3.10] - 2019-10-18 + +### Features + +* Can now pass a package as an argument to the `--actions` parameter of the + `rasa run actions` command. + +### Bugfixes + +* Fixed visualization of stories with entities which led to a failing + visualization in Rasa X + +## [1.3.9] - 2019-10-10 + +### Features + +* Port of 1.2.10 (support for RabbitMQ TLS authentication and `port` key in + event broker endpoint config). + +* Port of 1.2.11 (support for passing a CA file for SSL certificate verification via the + –ssl-ca-file flag). + +### Bugfixes + +* Fixed the hanging HTTP call with `ner_duckling_http` pipeline. + +* Fixed text processing of `intent` attribute inside `CountVectorFeaturizer`. + +* Fixed `argument of type 'NoneType' is not iterable` when using `rasa shell`, + `rasa interactive` / `rasa run` + +## [1.3.8] - 2019-10-08 + +### Improvements + +* Policies now only get imported if they are actually used. This removes + TensorFlow warnings when starting Rasa X + +### Bugfixes + +* Fixed error `Object of type 'MaxHistoryTrackerFeaturizer' is not JSON serializable` + when running `rasa train core` + +* Default channel `send_` methods no longer support kwargs as they caused issues in incompatible channels + +## [1.3.7] - 2019-09-27 + +### Bugfixes + +* re-added TLS, SRV dependencies for PyMongo + +* socketio can now be run without turning on the `--enable-api` flag + +* MappingPolicy no longer fails when the latest action doesn't have a policy + +## [1.3.6] - 2019-09-21 + +### Features + +* Added the ability for users to specify a conversation id to send a message to when + using the `RasaChat` input channel. + +## [1.3.5] - 2019-09-20 + +### Bugfixes + +* Fixed issue where `rasa init` would fail without spaCy being installed + +## [1.3.4] - 2019-09-20 + +### Features + +* Added the ability to set the `backlog` parameter in Sanics `run()` method using + the `SANIC_BACKLOG` environment variable. This parameter sets the + number of unaccepted connections the server allows before refusing new + connections. A default value of 100 is used if the variable is not set. + +* Status endpoint (`/status`) now also returns the number of training processes currently running + +### Bugfixes + +* Added the ability to properly deal with spaCy `Doc`-objects created on + empty strings as discussed [here](https://github.com/RasaHQ/rasa/issues/4445). + Only training samples that actually bear content are sent to `self.nlp.pipe` + for every given attribute. Non-content-bearing samples are converted to empty + `Doc`-objects. The resulting lists are merged with their preserved order and + properly returned. + +* asyncio warnings are now only printed if the callback takes more than 100ms + (up from 1ms). + +* `agent.load_model_from_server` no longer affects logging. + +### Improvements + +* The endpoint `POST /model/train` no longer supports specifying an output directory + for the trained model using the field `out`. Instead you can choose whether you + want to save the trained model in the default model directory (`models`) + (default behavior) or in a temporary directory by specifying the + `save_to_default_model_directory` field in the training request. + +## [1.3.3] - 2019-09-13 + +### Bugfixes + +* Added a check to avoid training `CountVectorizer` for a particular + attribute of a message if no text is provided for that attribute across + the training data. + +* Default one-hot representation for label featurization inside `EmbeddingIntentClassifier` if label features don't exist. + +* Policy ensemble no longer incorrectly wrings “missing mapping policy” when + mapping policy is present. + +* “text” from `utter_custom_json` now correctly saved to tracker when using telegram channel + +### Deprecations and Removals + +* Removed computation of `intent_spacy_doc`. As a result, none of the spacy components process intents now. + +## [1.3.2] - 2019-09-10 + +### Bugfixes + +* SQL tracker events are retrieved ordered by timestamps. This fixes interactive + learning events being shown in the wrong order. + +## [1.3.1] - 2019-09-09 + +### Improvements + +* Pin gast to == 0.2.2 + +## [1.3.0] - 2019-09-05 + +### Features + +* Added option to persist nlu training data (default: False) + +* option to save stories in e2e format for interactive learning + +* bot messages contain the `timestamp` of the `BotUttered` event, which can be used in channels + +* `FallbackPolicy` can now be configured to trigger when the difference between confidences of two predicted intents is too narrow + +* experimental training data importer which supports training with data of multiple + sub bots. Please see the + [docs](./training-data-importers.mdx) for more + information. + +* throw error during training when triggers are defined in the domain without + `MappingPolicy` being present in the policy ensemble + +* The tracker is now available within the interpreter's `parse` method, giving the + ability to create interpreter classes that use the tracker state (eg. slot values) + during the parsing of the message. More details on motivation of this change see + issues/3015. + +* add example bot `knowledgebasebot` to showcase the usage of `ActionQueryKnowledgeBase` + +* `softmax` starspace loss for both `EmbeddingPolicy` and `EmbeddingIntentClassifier` + +* `balanced` batching strategy for both `EmbeddingPolicy` and `EmbeddingIntentClassifier` + +* `max_history` parameter for `EmbeddingPolicy` + +* Successful predictions of the NER are written to a file if `--successes` is set when running `rasa test nlu` + +* Incorrect predictions of the NER are written to a file by default. You can disable it via `--no-errors`. + +* New NLU component `ResponseSelector` added for the task of response selection + +* Message data attribute can contain two more keys - `response_key`, `response` depending on the training data + +* New action type implemented by `ActionRetrieveResponse` class and identified with `response_` prefix + +* Vocabulary sharing inside `CountVectorsFeaturizer` with `use_shared_vocab` flag. If set to True, vocabulary of corpus is shared between text, intent and response attributes of message + +* Added an option to share the hidden layer weights of text input and label input inside `EmbeddingIntentClassifier` using the flag `share_hidden_layers` + +* New type of training data file in NLU which stores response phrases for response selection task. + +* Add flag `intent_split_symbol` and `intent_tokenization_flag` to all `WhitespaceTokenizer`, `JiebaTokenizer` and `SpacyTokenizer` + +* Added evaluation for response selector. Creates a report `response_selection_report.json` inside `--out` directory. + +* argument `--config-endpoint` to specify the URL from which `rasa x` pulls + the runtime configuration (endpoints and credentials) + +* `LockStore` class storing instances of `TicketLock` for every `conversation_id` + +* environment variables `SQL_POOL_SIZE` (default: 50) and `SQL_MAX_OVERFLOW` + (default: 100) can be set to control the pool size and maximum pool overflow for + `SQLTrackerStore` when used with the `postgresql` dialect + +* Add a bot_challenge intent and a utter_iamabot action to all example projects and the rasa init bot. + +* Allow sending attachments when using the socketio channel + +* `rasa data validate` will fail with a non-zero exit code if validation fails + +### Improvements + +* added character-level `CountVectorsFeaturizer` with empirically found parameters + into the `supervised_embeddings` NLU pipeline template + +* NLU evaluations now also stores its output in the output directory like the core evaluation + +* show warning in case a default path is used instead of a provided, invalid path + +* compare mode of `rasa train core` allows the whole core config comparison, + naming style of models trained for comparison is changed (this is a breaking change) + +* pika keeps a single connection open, instead of open and closing on each incoming event + +* `RasaChatInput` fetches the public key from the Rasa X API. The key is used to + decode the bearer token containing the conversation ID. This requires + `rasa-x>=0.20.2`. + +* more specific exception message when loading custom components depending on whether component's path or + class name is invalid or can't be found in the global namespace + +* change priorities so that the `MemoizationPolicy` has higher priority than the `MappingPolicy` + +* substitute LSTM with Transformer in `EmbeddingPolicy` + +* `EmbeddingPolicy` can now use `MaxHistoryTrackerFeaturizer` + +* non zero `evaluate_on_num_examples` in `EmbeddingPolicy` + and `EmbeddingIntentClassifier` is the size of + hold out validation set that is excluded from training data + +* defaults parameters and architectures for both `EmbeddingPolicy` and + `EmbeddingIntentClassifier` are changed (this is a breaking change) + +* evaluation of NER does not include 'no-entity' anymore + +* `--successes` for `rasa test nlu` is now boolean values. If set incorrect/successful predictions + are saved in a file. + +* `--errors` is renamed to `--no-errors` and is now a boolean value. By default incorrect predictions are saved + in a file. If `--no-errors` is set predictions are not written to a file. + +* Remove `label_tokenization_flag` and `label_split_symbol` from `EmbeddingIntentClassifier`. Instead move these parameters to `Tokenizers`. + +* Process features of all attributes of a message, i.e. - text, intent and response inside the respective component itself. For e.g. - intent of a message is now tokenized inside the tokenizer itself. + +* Deprecate `as_markdown` and `as_json` in favour of `nlu_as_markdown` and `nlu_as_json` respectively. + +* pin python-engineio >= 3.9.3 + +* update python-socketio req to >= 4.3.1 + +### Bugfixes + +* `rasa test nlu` with a folder of configuration files + +* `MappingPolicy` standard featurizer is set to `None` + +* Removed `text` parameter from send_attachment function in slack.py to avoid duplication of text output to slackbot + +* server `/status` endpoint reports status when an NLU-only model is loaded + +### Deprecations and Removals + +* Removed `--report` argument from `rasa test nlu`. All output files are stored in the `--out` directory. + +## [1.2.12] - 2019-10-16 + +### Features + +* Support for transit encryption with Redis via `use_ssl: True` in the tracker store config in endpoints.yml + +## [1.2.11] - 2019-10-09 + +### Features + +* Support for passing a CA file for SSL certificate verification via the + –ssl-ca-file flag + +## [1.2.10] - 2019-10-08 + +### Features + +* Added support for RabbitMQ TLS authentication. The following environment variables + need to be set: + `RABBITMQ_SSL_CLIENT_CERTIFICATE` - path to the SSL client certificate (required) + `RABBITMQ_SSL_CLIENT_KEY` - path to the SSL client key (required) + `RABBITMQ_SSL_CA_FILE` - path to the SSL CA file (optional, for certificate + verification) + `RABBITMQ_SSL_KEY_PASSWORD` - SSL private key password (optional) + +* Added ability to define the RabbitMQ port using the `port` key in the + `event_broker` endpoint config. + +## [1.2.9] - 2019-09-17 + +### Bugfixes + +* Correctly pass SSL flag values to x CLI command (backport of + +## [1.2.8] - 2019-09-10 + +### Bugfixes + +* SQL tracker events are retrieved ordered by timestamps. This fixes interactive + learning events being shown in the wrong order. Backport of `1.3.2` patch + (PR #4427). + +## [1.2.7] - 2019-09-02 + +### Bugfixes + +* Added `query` dictionary argument to `SQLTrackerStore` which will be appended + to the SQL connection URL as query parameters. + +## [1.2.6] - 2019-09-02 + +### Bugfixes + +* fixed bug that occurred when sending template `elements` through a channel that doesn't support them + +## [1.2.5] - 2019-08-26 + +### Features + +* SSL support for `rasa run` command. Certificate can be specified using + `--ssl-certificate` and `--ssl-keyfile`. + +### Bugfixes + +* made default augmentation value consistent across repo + +* `'/restart'` will now also restart the bot if the tracker is paused + +## [1.2.4] - 2019-08-23 + +### Bugfixes + +* the `SocketIO` input channel now allows accesses from other origins + (fixes `SocketIO` channel on Rasa X) + +## [1.2.3] - 2019-08-15 + +### Improvements + +* messages with multiple entities are now handled properly with e2e evaluation + +* `data/test_evaluations/end_to_end_story.md` was re-written in the + restaurantbot domain + +## [1.2.3] - 2019-08-15 + +### Improvements + +* messages with multiple entities are now handled properly with e2e evaluation + +* `data/test_evaluations/end_to_end_story.md` was re-written in the restaurantbot domain + +### Bugfixes + +* Free text input was not allowed in the Rasa shell when the response template + contained buttons, which has now been fixed. + +## [1.2.2] - 2019-08-07 + +### Bugfixes + +* `UserUttered` events always got the same timestamp + +## [1.2.1] - 2019-08-06 + +### Features + +* Docs now have an `EDIT THIS PAGE` button + +### Bugfixes + +* `Flood control exceeded` error in Telegram connector which happened because the + webhook was set twice + +## [1.2.0] - 2019-08-01 + +### Features + +* add root route to server started without `--enable-api` parameter + +* add `--evaluate-model-directory` to `rasa test core` to evaluate models + from `rasa train core -c ` + +* option to send messages to the user by calling + `POST /conversations/{conversation_id}/execute` + +### Improvements + +* `Agent.update_model()` and `Agent.handle_message()` now work without needing to set a domain + or a policy ensemble + +* Update pytype to `2019.7.11` + +* new event broker class: `SQLProducer`. This event broker is now used when running locally with + Rasa X + +* API requests are not longer logged to `rasa_core.log` by default in order to avoid + problems when running on OpenShift (use `--log-file rasa_core.log` to retain the + old behavior) + +* `metadata` attribute added to `UserMessage` + +### Bugfixes + +* `rasa test core` can handle compressed model files + +* rasa can handle story files containing multi line comments + +* template will retain { if escaped with {. e.g. {{“foo”: {bar}}} will result in {“foo”: “replaced value”} + +## [1.1.8] - 2019-07-25 + +### Features + +* `TrainingFileImporter` interface to support customizing the process of loading + training data + +* fill slots for custom templates + +### Improvements + +* `Agent.update_model()` and `Agent.handle_message()` now work without needing to set a domain + or a policy ensemble + +* update pytype to `2019.7.11` + +### Bugfixes + +* interactive learning bug where reverted user utterances were dumped to training data + +* added timeout to terminal input channel to avoid freezing input in case of server + errors + +* fill slots for image, buttons, quick_replies and attachments in templates + +* `rasa train core` in comparison mode stores the model files compressed (`tar.gz` files) + +* slot setting in interactive learning with the TwoStageFallbackPolicy + +## [1.1.7] - 2019-07-18 + +### Features + +* added optional pymongo dependencies `[tls, srv]` to `requirements.txt` for better mongodb support + +* `case_sensitive` option added to `WhiteSpaceTokenizer` with `true` as default. + +### Bugfixes + +* validation no longer throws an error during interactive learning + +* fixed wrong cleaning of `use_entities` in case it was a list and not `True` + +* updated the server endpoint `/model/parse` to handle also messages with the intent prefix + +* fixed bug where “No model found” message appeared after successfully running the bot + +* debug logs now print to `rasa_core.log` when running `rasa x -vv` or `rasa run -vv` + +## [1.1.6] - 2019-07-12 + +### Features + +* rest channel supports setting a message's input_channel through a field + `input_channel` in the request body + +### Improvements + +* recommended syntax for empty `use_entities` and `ignore_entities` in the domain file + has been updated from `False` or `None` to an empty list (`[]`) + +### Bugfixes + +* `rasa run` without `--enable-api` does not require a local model anymore + +* using `rasa run` with `--enable-api` to run a server now prints + “running Rasa server” instead of “running Rasa Core server” + +* actions, intents, and utterances created in `rasa interactive` can no longer be empty + +## [1.1.5] - 2019-07-10 + +### Features + +* debug logging now tells you which tracker store is connected + +* the response of `/model/train` now includes a response header for the trained model filename + +* `Validator` class to help developing by checking if the files have any errors + +* project's code is now linted using flake8 + +* `info` log when credentials were provided for multiple channels and channel in + `--connector` argument was specified at the same time + +* validate export paths in interactive learning + +### Improvements + +* deprecate `rasa.core.agent.handle_channels(...)\`. Please use \`\`rasa.run(...)` + or `rasa.core.run.configure_app` instead. + +* `Agent.load()` also accepts `tar.gz` model file + +### Deprecations and Removals + +* revert the stripping of trailing slashes in endpoint URLs since this can lead to + problems in case the trailing slash is actually wanted + +* starter packs were removed from Github and are therefore no longer tested by Travis script + +### Bugfixes + +* all temporal model files are now deleted after stopping the Rasa server + +* `rasa shell nlu` now outputs unicode characters instead of `\\uxxxx` codes + +* fixed PUT /model with model_server by deserializing the model_server to + EndpointConfig. + +* `x in AnySlotDict` is now `True` for any `x`, which fixes empty slot warnings in + interactive learning + +* `rasa train` now also includes NLU files in other formats than the Rasa format + +* `rasa train core` no longer crashes without a `--domain` arg + +* `rasa interactive` now looks for endpoints in `endpoints.yml` if no `--endpoints` arg is passed + +* custom files, e.g. custom components and channels, load correctly when using + the command line interface + +* `MappingPolicy` now works correctly when used as part of a PolicyEnsemble + +## [1.1.4] - 2019-06-18 + +### Features + +* unfeaturize single entities + +* added agent readiness check to the `/status` resource + +### Improvements + +* removed leading underscore from name of '_create_initial_project' function. + +### Bugfixes + +* fixed bug where facebook quick replies were not rendering + +* take FB quick reply payload rather than text as input + +* fixed bug where training_data path in metadata.json was an absolute path + +## [1.1.3] - 2019-06-14 + +### Bugfixes + +* fixed any inconsistent type annotations in code and some bugs revealed by + type checker + +## [1.1.2] - 2019-06-13 + +### Bugfixes + +* fixed duplicate events appearing in tracker when using a PostgreSQL tracker store + +## [1.1.1] - 2019-06-13 + +### Bugfixes + +* fixed compatibility with Rasa SDK + +* bot responses can contain `custom` messages besides other message types + +## [1.1.0] - 2019-06-13 + +### Features + +* nlu configs can now be directly compared for performance on a dataset + in `rasa test nlu` + +### Improvements + +* update the tracker in interactive learning through reverting and appending events + instead of replacing the tracker + +* `POST /conversations/{conversation_id}/tracker/events` supports a list of events + +### Bugfixes + +* fixed creation of `RasaNLUHttpInterpreter` + +* form actions are included in domain warnings + +* default actions, which are overriden by custom actions and are listed in the + domain are excluded from domain warnings + +* SQL `data` column type to `Text` for compatibility with MySQL + +* non-featurizer training parameters don't break SklearnPolicy anymore + +## [1.0.9] - 2019-06-10 + +### Improvements + +* revert PR #3739 (as this is a breaking change): set `PikaProducer` and + `KafkaProducer` default queues back to `rasa_core_events` + +## [1.0.8] - 2019-06-10 + +### Features + +* support for specifying full database urls in the `SQLTrackerStore` configuration + +* maximum number of predictions can be set via the environment variable + `MAX_NUMBER_OF_PREDICTIONS` (default is 10) + +### Improvements + +* default `PikaProducer` and `KafkaProducer` queues to `rasa_production_events` + +* exclude unfeaturized slots from domain warnings + +### Bugfixes + +* loading of additional training data with the `SkillSelector` + +* strip trailing slashes in endpoint URLs + +## [1.0.7] - 2019-06-06 + +### Features + +* added argument `--rasa-x-port` to specify the port of Rasa X when running Rasa X locally via `rasa x` + +### Bugfixes + +* slack notifications from bots correctly render text + +* fixed usage of `--log-file` argument for `rasa run` and `rasa shell` + +* check if correct tracker store is configured in local mode + +## [1.0.6] - 2019-06-03 + +### Bugfixes + +* fixed backwards incompatible utils changes + +## [1.0.5] - 2019-06-03 + +### Bugfixes + +* fixed spacy being a required dependency (regression) + +## [1.0.4] - 2019-06-03 + +### Features + +* automatic creation of index on the `sender_id` column when using an SQL + tracker store. If you have an existing data and you are running into performance + issues, please make sure to add an index manually using + `CREATE INDEX event_idx_sender_id ON events (sender_id);`. + +### Improvements + +* NLU evaluation in cross-validation mode now also provides intent/entity reports, + confusion matrix, etc. + +## [1.0.3] - 2019-05-30 + +### Bugfixes + +* non-ascii characters render correctly in stories generated from interactive learning + +* validate domain file before usage, e.g. print proper error messages if domain file + is invalid instead of raising errors + +## [1.0.2] - 2019-05-29 + +### Features + +* added `domain_warnings()` method to `Domain` which returns a dict containing the + diff between supplied {actions, intents, entities, slots} and what's contained in the + domain + +### Bugfixes + +* fix lookup table files failed to load issues/3622 + +* buttons can now be properly selected during cmdline chat or when in interactive learning + +* set slots correctly when events are added through the API + +* mapping policy no longer ignores NLU threshold + +* mapping policy priority is correctly persisted + +## [1.0.1] - 2019-05-21 + +### Bugfixes + +* updated installation command in docs for Rasa X + +## [1.0.0] - 2019-05-21 + +### Features + +* added arguments to set the file paths for interactive training + +* added quick reply representation for command-line output + +* added option to specify custom button type for Facebook buttons + +* added tracker store persisting trackers into a SQL database + (`SQLTrackerStore`) + +* added rasa command line interface and API + +* Rasa HTTP training endpoint at `POST /jobs`. This endpoint + will train a combined Rasa Core and NLU model + +* `ReminderCancelled(action_name)` event to cancel given action_name reminder + for current user + +* Rasa HTTP intent evaluation endpoint at `POST /intentEvaluation`. + This endpoints performs an intent evaluation of a Rasa model + +* option to create template for new utterance action in `interactive learning` + +* you can now choose actions previously created in the same session + in `interactive learning` + +* add formatter 'black' + +* channel-specific utterances via the `- "channel":` key in utterance templates + +* arbitrary json messages via the `- "custom":` key in utterance templates and + via `utter_custom_json()` method in custom actions + +* support to load sub skills (domain, stories, nlu data) + +* support to select which sub skills to load through `import` section in + `config.yml` + +* support for spaCy 2.1 + +* a model for an agent can now also be loaded from a remote storage + +* log level can be set via environment variable `LOG_LEVEL` + +* add `--store-uncompressed` to train command to not compress Rasa model + +* log level of libraries, such as tensorflow, can be set via environment variable `LOG_LEVEL_LIBRARIES` + +* if no spaCy model is linked upon building a spaCy pipeline, an appropriate error message + is now raised with instructions for linking one + +### Improvements + +* renamed all CLI parameters containing any `_` to use dashes `-` instead (GNU standard) + +* renamed `rasa_core` package to `rasa.core` + +* for interactive learning only include manually annotated and ner_crf entities in nlu export + +* made `message_id` an additional argument to `interpreter.parse` + +* changed removing punctuation logic in `WhitespaceTokenizer` + +* `training_processes` in the Rasa NLU data router have been renamed to `worker_processes` + +* created a common utils package `rasa.utils` for nlu and core, common methods like `read_yaml` moved there + +* removed `--num_threads` from run command (server will be asynchronous but + running in a single thread) + +* the `_check_token()` method in `RasaChat` now authenticates against `/auth/verify` instead of `/user` + +* removed `--pre_load` from run command (Rasa NLU server will just have a maximum of one model and that model will be + loaded by default) + +* changed file format of a stored trained model from the Rasa NLU server to `tar.gz` + +* train command uses fallback config if an invalid config is given + +* test command now compares multiple models if a list of model files is provided for the argument `--model` + +* Merged rasa.core and rasa.nlu server into a single server. See swagger file in `docs/_static/spec/server.yaml` for + available endpoints. + +* `utter_custom_message()` method in rasa_core_sdk has been renamed to `utter_elements()` + +* updated dependencies. as part of this, models for spacy need to be reinstalled + for 2.1 (from 2.0) + +* make sure all command line arguments for `rasa test` and `rasa interactive` are actually used, removed arguments + that were not used at all (e.g. `--core` for `rasa test`) + +### Deprecations and Removals + +* removed possibility to execute `python -m rasa_core.train` etc. (e.g. scripts in `rasa.core` and `rasa.nlu`). + Use the CLI for rasa instead, e.g. `rasa train core`. + +* removed `_sklearn_numpy_warning_fix` from the `SklearnIntentClassifier` + +* removed `Dispatcher` class from core + +* removed projects: the Rasa NLU server now has a maximum of one model at a time loaded. + +### Bugfixes + +* evaluating core stories with two stage fallback gave an error, trying to handle None for a policy + +* the `/evaluate` route for the Rasa NLU server now runs evaluation + in a parallel process, which prevents the currently loaded model unloading + +* added missing implementation of the `keys()` function for the Redis Tracker + Store + +* in interactive learning: only updates entity values if user changes annotation + +* log options from the command line interface are applied (they overwrite the environment variable) + +* all message arguments (kwargs in dispatcher.utter methods, as well as template args) are now sent through to output channels + +* utterance templates defined in actions are checked for existence upon training a new agent, and a warning + is thrown before training if one is missing diff --git a/CHANGELOG.rst b/CHANGELOG.rst deleted file mode 100644 index 881cefb2f156..000000000000 --- a/CHANGELOG.rst +++ /dev/null @@ -1,606 +0,0 @@ -:desc: Rasa Changelog - - -Rasa Change Log -=============== - -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning`_ starting with version 1.0. - -[Unreleased 1.4.0] - `master`_ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Added ------ - -Changed -------- - -Removed -------- - -Fixed ------ -- fix missing ``tkinter`` dependency for running tests on Ubuntu - - -[1.3.3] - 2019-09-13 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Added a check to avoid training ``CountVectorizer`` for a particular - attribute of a message if no text is provided for that attribute across - the training data. -- Default one-hot representation for label featurization inside ``EmbeddingIntentClassifier`` if label features don't exist. -- Policy ensemble no longer incorrectly wrings "missing mapping policy" when - mapping policy is present. - -Removed -------- -- Removed computation of ``intent_spacy_doc``. As a result, none of the spacy components process intents now. - -[1.3.2] - 2019-09-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- SQL tracker events are retrieved ordered by timestamps. This fixes interactive - learning events being shown in the wrong order. - -[1.3.1] - 2019-09-09 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Pin gast to == 0.2.2 - -[1.3.0] - 2019-09-05 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Added option to persist nlu training data (default: False) -- option to save stories in e2e format for interactive learning -- bot messages contain the ``timestamp`` of the ``BotUttered`` event, which can be used in channels -- ``FallbackPolicy`` can now be configured to trigger when the difference between confidences of two predicted intents is too narrow -- experimental training data importer which supports training with data of multiple - sub bots. Please see the - `docs `_ for more - information. -- throw error during training when triggers are defined in the domain without - ``MappingPolicy`` being present in the policy ensemble -- The tracker is now available within the interpreter's ``parse`` method, giving the - ability to create interpreter classes that use the tracker state (eg. slot values) - during the parsing of the message. More details on motivation of this change see - issues/3015. -- add example bot ``knowledgebasebot`` to showcase the usage of ``ActionQueryKnowledgeBase`` -- ``softmax`` starspace loss for both ``EmbeddingPolicy`` and ``EmbeddingIntentClassifier`` -- ``balanced`` batching strategy for both ``EmbeddingPolicy`` and ``EmbeddingIntentClassifier`` -- ``max_history`` parameter for ``EmbeddingPolicy`` -- Successful predictions of the NER are written to a file if ``--successes`` is set when running ``rasa test nlu`` -- Incorrect predictions of the NER are written to a file by default. You can disable it via ``--no-errors``. -- New NLU component ``ResponseSelector`` added for the task of response selection -- Message data attribute can contain two more keys - ``response_key``, ``response`` depending on the training data -- New action type implemented by ``ActionRetrieveResponse`` class and identified with ``response_`` prefix -- Vocabulary sharing inside ``CountVectorsFeaturizer`` with ``use_shared_vocab`` flag. If set to True, vocabulary of corpus is shared between text, intent and response attributes of message -- Added an option to share the hidden layer weights of text input and label input inside ``EmbeddingIntentClassifier`` using the flag ``share_hidden_layers`` -- New type of training data file in NLU which stores response phrases for response selection task. -- Add flag ``intent_split_symbol`` and ``intent_tokenization_flag`` to all ``WhitespaceTokenizer``, ``JiebaTokenizer`` and ``SpacyTokenizer`` -- Added evaluation for response selector. Creates a report ``response_selection_report.json`` inside ``--out`` directory. -- argument ``--config-endpoint`` to specify the URL from which ``rasa x`` pulls - the runtime configuration (endpoints and credentials) -- ``LockStore`` class storing instances of ``TicketLock`` for every ``conversation_id`` -- environment variables ``SQL_POOL_SIZE`` (default: 50) and ``SQL_MAX_OVERFLOW`` - (default: 100) can be set to control the pool size and maximum pool overflow for - ``SQLTrackerStore`` when used with the ``postgresql`` dialect -- Add a `bot_challenge` intent and a `utter_iamabot` action to all example projects and the rasa init bot. -- Allow sending attachments when using the socketio channel -- ``rasa data validate`` will fail with a non-zero exit code if validation fails - -Changed -------- -- added character-level ``CountVectorsFeaturizer`` with empirically found parameters - into the ``supervised_embeddings`` NLU pipeline template -- NLU evaluations now also stores its output in the output directory like the core evaluation -- show warning in case a default path is used instead of a provided, invalid path -- compare mode of ``rasa train core`` allows the whole core config comparison, - naming style of models trained for comparison is changed (this is a breaking change) -- pika keeps a single connection open, instead of open and closing on each incoming event -- ``RasaChatInput`` fetches the public key from the Rasa X API. The key is used to - decode the bearer token containing the conversation ID. This requires - ``rasa-x>=0.20.2``. -- more specific exception message when loading custom components depending on whether component's path or - class name is invalid or can't be found in the global namespace -- change priorities so that the ``MemoizationPolicy`` has higher priority than the ``MappingPolicy`` -- substitute LSTM with Transformer in ``EmbeddingPolicy`` -- ``EmbeddingPolicy`` can now use ``MaxHistoryTrackerFeaturizer`` -- non zero ``evaluate_on_num_examples`` in ``EmbeddingPolicy`` - and ``EmbeddingIntentClassifier`` is the size of - hold out validation set that is excluded from training data -- defaults parameters and architectures for both ``EmbeddingPolicy`` and - ``EmbeddingIntentClassifier`` are changed (this is a breaking change) -- evaluation of NER does not include 'no-entity' anymore -- ``--successes`` for ``rasa test nlu`` is now boolean values. If set incorrect/successful predictions - are saved in a file. -- ``--errors`` is renamed to ``--no-errors`` and is now a boolean value. By default incorrect predictions are saved - in a file. If ``--no-errors`` is set predictions are not written to a file. -- Remove ``label_tokenization_flag`` and ``label_split_symbol`` from ``EmbeddingIntentClassifier``. Instead move these parameters to ``Tokenizers``. -- Process features of all attributes of a message, i.e. - text, intent and response inside the respective component itself. For e.g. - intent of a message is now tokenized inside the tokenizer itself. -- Deprecate ``as_markdown`` and ``as_json`` in favour of ``nlu_as_markdown`` and ``nlu_as_json`` respectively. -- pin python-engineio >= 3.9.3 -- update python-socketio req to >= 4.3.1 - -Fixed ------ -- ``rasa test nlu`` with a folder of configuration files -- ``MappingPolicy`` standard featurizer is set to ``None`` -- Removed ``text`` parameter from send_attachment function in slack.py to avoid duplication of text output to slackbot -- server ``/status`` endpoint reports status when an NLU-only model is loaded - -Removed -------- -- Removed ``--report`` argument from ``rasa test nlu``. All output files are stored in the ``--out`` directory. - -[1.2.7] - 2019-09-02 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Added ``query`` dictionary argument to ``SQLTrackerStore`` which will be appended - to the SQL connection URL as query parameters. - - -[1.2.6] - 2019-09-02 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed bug that occurred when sending template ``elements`` through a channel that doesn't support them - -[1.2.5] - 2019-08-26 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- SSL support for ``rasa run`` command. Certificate can be specified using - ``--ssl-certificate`` and ``--ssl-keyfile``. - -Fixed ------ -- made default augmentation value consistent across repo -- ``'/restart'`` will now also restart the bot if the tracker is paused - - -[1.2.4] - 2019-08-23 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- the ``SocketIO`` input channel now allows accesses from other origins - (fixes ``SocketIO`` channel on Rasa X) - -[1.2.3] - 2019-08-15 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- messages with multiple entities are now handled properly with e2e evaluation -- ``data/test_evaluations/end_to_end_story.md`` was re-written in the - restaurantbot domain - -[1.2.3] - 2019-08-15 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- messages with multiple entities are now handled properly with e2e evaluation -- ``data/test_evaluations/end_to_end_story.md`` was re-written in the restaurantbot domain - -Fixed ------ -- Free text input was not allowed in the Rasa shell when the response template - contained buttons, which has now been fixed. - -[1.2.2] - 2019-08-07 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- ``UserUttered`` events always got the same timestamp - -[1.2.1] - 2019-08-06 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Docs now have an ``EDIT THIS PAGE`` button - -Fixed ------ -- ``Flood control exceeded`` error in Telegram connector which happened because the - webhook was set twice - -[1.2.0] - 2019-08-01 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- add root route to server started without ``--enable-api`` parameter -- add ``--evaluate-model-directory`` to ``rasa test core`` to evaluate models - from ``rasa train core -c `` -- option to send messages to the user by calling - ``POST /conversations/{conversation_id}/execute`` - -Changed -------- -- ``Agent.update_model()`` and ``Agent.handle_message()`` now work without needing to set a domain - or a policy ensemble -- Update pytype to ``2019.7.11`` -- new event broker class: ``SQLProducer``. This event broker is now used when running locally with - Rasa X -- API requests are not longer logged to ``rasa_core.log`` by default in order to avoid - problems when running on OpenShift (use ``--log-file rasa_core.log`` to retain the - old behavior) -- ``metadata`` attribute added to ``UserMessage`` - -Fixed ------ -- ``rasa test core`` can handle compressed model files -- rasa can handle story files containing multi line comments -- template will retain `{` if escaped with `{`. e.g. `{{"foo": {bar}}}` will result in `{"foo": "replaced value"}` - -[1.1.8] - 2019-07-25 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``TrainingFileImporter`` interface to support customizing the process of loading - training data -- fill slots for custom templates - -Changed -------- -- ``Agent.update_model()`` and ``Agent.handle_message()`` now work without needing to set a domain - or a policy ensemble -- update pytype to ``2019.7.11`` - -Fixed ------ -- interactive learning bug where reverted user utterances were dumped to training data -- added timeout to terminal input channel to avoid freezing input in case of server - errors -- fill slots for image, buttons, quick_replies and attachments in templates -- ``rasa train core`` in comparison mode stores the model files compressed (``tar.gz`` files) -- slot setting in interactive learning with the TwoStageFallbackPolicy - - -[1.1.7] - 2019-07-18 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added optional pymongo dependencies ``[tls, srv]`` to ``requirements.txt`` for better mongodb support -- ``case_sensitive`` option added to ``WhiteSpaceTokenizer`` with ``true`` as default. - -Fixed ------ -- validation no longer throws an error during interactive learning -- fixed wrong cleaning of ``use_entities`` in case it was a list and not ``True`` -- updated the server endpoint ``/model/parse`` to handle also messages with the intent prefix -- fixed bug where "No model found" message appeared after successfully running the bot -- debug logs now print to ``rasa_core.log`` when running ``rasa x -vv`` or ``rasa run -vv`` - -[1.1.6] - 2019-07-12 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- rest channel supports setting a message's input_channel through a field - ``input_channel`` in the request body - -Changed -------- -- recommended syntax for empty ``use_entities`` and ``ignore_entities`` in the domain file - has been updated from ``False`` or ``None`` to an empty list (``[]``) - -Fixed ------ -- ``rasa run`` without ``--enable-api`` does not require a local model anymore -- using ``rasa run`` with ``--enable-api`` to run a server now prints - "running Rasa server" instead of "running Rasa Core server" -- actions, intents, and utterances created in ``rasa interactive`` can no longer be empty - - -[1.1.5] - 2019-07-10 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- debug logging now tells you which tracker store is connected -- the response of ``/model/train`` now includes a response header for the trained model filename -- ``Validator`` class to help developing by checking if the files have any errors -- project's code is now linted using flake8 -- ``info`` log when credentials were provided for multiple channels and channel in - ``--connector`` argument was specified at the same time -- validate export paths in interactive learning - -Changed -------- -- deprecate ``rasa.core.agent.handle_channels(...)`. Please use ``rasa.run(...)`` - or ``rasa.core.run.configure_app`` instead. -- ``Agent.load()`` also accepts ``tar.gz`` model file - -Removed -------- -- revert the stripping of trailing slashes in endpoint URLs since this can lead to - problems in case the trailing slash is actually wanted -- starter packs were removed from Github and are therefore no longer tested by Travis script - -Fixed ------ -- all temporal model files are now deleted after stopping the Rasa server -- ``rasa shell nlu`` now outputs unicode characters instead of ``\uxxxx`` codes -- fixed PUT /model with model_server by deserializing the model_server to - EndpointConfig. -- ``x in AnySlotDict`` is now ``True`` for any ``x``, which fixes empty slot warnings in - interactive learning -- ``rasa train`` now also includes NLU files in other formats than the Rasa format -- ``rasa train core`` no longer crashes without a ``--domain`` arg -- ``rasa interactive`` now looks for endpoints in ``endpoints.yml`` if no ``--endpoints`` arg is passed -- custom files, e.g. custom components and channels, load correctly when using - the command line interface -- ``MappingPolicy`` now works correctly when used as part of a PolicyEnsemble - - -[1.1.4] - 2019-06-18 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- unfeaturize single entities -- added agent readiness check to the ``/status`` resource - -Changed -------- -- removed leading underscore from name of '_create_initial_project' function. - -Fixed ------ -- fixed bug where facebook quick replies were not rendering -- take FB quick reply payload rather than text as input -- fixed bug where `training_data` path in `metadata.json` was an absolute path - -[1.1.3] - 2019-06-14 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed any inconsistent type annotations in code and some bugs revealed by - type checker - -[1.1.2] - 2019-06-13 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed duplicate events appearing in tracker when using a PostgreSQL tracker store - -[1.1.1] - 2019-06-13 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed compatibility with Rasa SDK -- bot responses can contain ``custom`` messages besides other message types - -[1.1.0] - 2019-06-13 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- nlu configs can now be directly compared for performance on a dataset - in ``rasa test nlu`` - -Changed -------- -- update the tracker in interactive learning through reverting and appending events - instead of replacing the tracker -- ``POST /conversations/{conversation_id}/tracker/events`` supports a list of events - -Fixed ------ -- fixed creation of ``RasaNLUHttpInterpreter`` -- form actions are included in domain warnings -- default actions, which are overriden by custom actions and are listed in the - domain are excluded from domain warnings -- SQL ``data`` column type to ``Text`` for compatibility with MySQL -- non-featurizer training parameters don't break `SklearnPolicy` anymore - -[1.0.9] - 2019-06-10 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- revert PR #3739 (as this is a breaking change): set ``PikaProducer`` and - ``KafkaProducer`` default queues back to ``rasa_core_events`` - -[1.0.8] - 2019-06-10 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for specifying full database urls in the ``SQLTrackerStore`` configuration -- maximum number of predictions can be set via the environment variable - ``MAX_NUMBER_OF_PREDICTIONS`` (default is 10) - -Changed -------- -- default ``PikaProducer`` and ``KafkaProducer`` queues to ``rasa_production_events`` -- exclude unfeaturized slots from domain warnings - -Fixed ------ -- loading of additional training data with the ``SkillSelector`` -- strip trailing slashes in endpoint URLs - -[1.0.7] - 2019-06-06 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added argument ``--rasa-x-port`` to specify the port of Rasa X when running Rasa X locally via ``rasa x`` - -Fixed ------ -- slack notifications from bots correctly render text -- fixed usage of ``--log-file`` argument for ``rasa run`` and ``rasa shell`` -- check if correct tracker store is configured in local mode - -[1.0.6] - 2019-06-03 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed backwards incompatible utils changes - -[1.0.5] - 2019-06-03 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed spacy being a required dependency (regression) - -[1.0.4] - 2019-06-03 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- automatic creation of index on the ``sender_id`` column when using an SQL - tracker store. If you have an existing data and you are running into performance - issues, please make sure to add an index manually using - ``CREATE INDEX event_idx_sender_id ON events (sender_id);``. - -Changed -------- -- NLU evaluation in cross-validation mode now also provides intent/entity reports, - confusion matrix, etc. - -[1.0.3] - 2019-05-30 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- non-ascii characters render correctly in stories generated from interactive learning -- validate domain file before usage, e.g. print proper error messages if domain file - is invalid instead of raising errors - -[1.0.2] - 2019-05-29 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added ``domain_warnings()`` method to ``Domain`` which returns a dict containing the - diff between supplied {actions, intents, entities, slots} and what's contained in the - domain - -Fixed ------ -- fix lookup table files failed to load issues/3622 -- buttons can now be properly selected during cmdline chat or when in interactive learning -- set slots correctly when events are added through the API -- mapping policy no longer ignores NLU threshold -- mapping policy priority is correctly persisted - - -[1.0.1] - 2019-05-21 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- updated installation command in docs for Rasa X - -[1.0.0] - 2019-05-21 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added arguments to set the file paths for interactive training -- added quick reply representation for command-line output -- added option to specify custom button type for Facebook buttons -- added tracker store persisting trackers into a SQL database - (``SQLTrackerStore``) -- added rasa command line interface and API -- Rasa HTTP training endpoint at ``POST /jobs``. This endpoint - will train a combined Rasa Core and NLU model -- ``ReminderCancelled(action_name)`` event to cancel given action_name reminder - for current user -- Rasa HTTP intent evaluation endpoint at ``POST /intentEvaluation``. - This endpoints performs an intent evaluation of a Rasa model -- option to create template for new utterance action in ``interactive learning`` -- you can now choose actions previously created in the same session - in ``interactive learning`` -- add formatter 'black' -- channel-specific utterances via the ``- "channel":`` key in utterance templates -- arbitrary json messages via the ``- "custom":`` key in utterance templates and - via ``utter_custom_json()`` method in custom actions -- support to load sub skills (domain, stories, nlu data) -- support to select which sub skills to load through ``import`` section in - ``config.yml`` -- support for spaCy 2.1 -- a model for an agent can now also be loaded from a remote storage -- log level can be set via environment variable ``LOG_LEVEL`` -- add ``--store-uncompressed`` to train command to not compress Rasa model -- log level of libraries, such as tensorflow, can be set via environment variable ``LOG_LEVEL_LIBRARIES`` -- if no spaCy model is linked upon building a spaCy pipeline, an appropriate error message - is now raised with instructions for linking one - -Changed -------- -- renamed all CLI parameters containing any ``_`` to use dashes ``-`` instead (GNU standard) -- renamed ``rasa_core`` package to ``rasa.core`` -- for interactive learning only include manually annotated and ner_crf entities in nlu export -- made ``message_id`` an additional argument to ``interpreter.parse`` -- changed removing punctuation logic in ``WhitespaceTokenizer`` -- ``training_processes`` in the Rasa NLU data router have been renamed to ``worker_processes`` -- created a common utils package ``rasa.utils`` for nlu and core, common methods like ``read_yaml`` moved there -- removed ``--num_threads`` from run command (server will be asynchronous but - running in a single thread) -- the ``_check_token()`` method in ``RasaChat`` now authenticates against ``/auth/verify`` instead of ``/user`` -- removed ``--pre_load`` from run command (Rasa NLU server will just have a maximum of one model and that model will be - loaded by default) -- changed file format of a stored trained model from the Rasa NLU server to ``tar.gz`` -- train command uses fallback config if an invalid config is given -- test command now compares multiple models if a list of model files is provided for the argument ``--model`` -- Merged rasa.core and rasa.nlu server into a single server. See swagger file in ``docs/_static/spec/server.yaml`` for - available endpoints. -- ``utter_custom_message()`` method in rasa_core_sdk has been renamed to ``utter_elements()`` -- updated dependencies. as part of this, models for spacy need to be reinstalled - for 2.1 (from 2.0) -- make sure all command line arguments for ``rasa test`` and ``rasa interactive`` are actually used, removed arguments - that were not used at all (e.g. ``--core`` for ``rasa test``) - -Removed -------- -- removed possibility to execute ``python -m rasa_core.train`` etc. (e.g. scripts in ``rasa.core`` and ``rasa.nlu``). - Use the CLI for rasa instead, e.g. ``rasa train core``. -- removed ``_sklearn_numpy_warning_fix`` from the ``SklearnIntentClassifier`` -- removed ``Dispatcher`` class from core -- removed projects: the Rasa NLU server now has a maximum of one model at a time loaded. - -Fixed ------ -- evaluating core stories with two stage fallback gave an error, trying to handle None for a policy -- the ``/evaluate`` route for the Rasa NLU server now runs evaluation - in a parallel process, which prevents the currently loaded model unloading -- added missing implementation of the ``keys()`` function for the Redis Tracker - Store -- in interactive learning: only updates entity values if user changes annotation -- log options from the command line interface are applied (they overwrite the environment variable) -- all message arguments (kwargs in dispatcher.utter methods, as well as template args) are now sent through to output channels -- utterance templates defined in actions are checked for existence upon training a new agent, and a warning - is thrown before training if one is missing - -.. _`master`: https://github.com/RasaHQ/rasa/ - -.. _`Semantic Versioning`: http://semver.org/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..6655af163822 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,203 @@ +
+ + + +- [How to open a GitHub issue & file a bug report](#how-to-open-a-github-issue--file-a-bug-report) + - [Working on a new feature or filing a bug report](#working-on-a-new-feature-or-filing-a-bug-report) + - [Working on an existing feature](#working-on-an-existing-feature) +- [How to open a GitHub Pull Request](#how-to-open-a-github-pull-request) + - [What is a Pull Request (PR)?](#what-is-a-pull-request-pr) + - [What to know before opening a PR](#what-to-know-before-opening-a-pr) + - [Opening issues before PRs](#opening-issues-before-prs) + - [Draft PRs](#draft-prs) + - [PRs should be a reasonable length](#prs-should-be-a-reasonable-length) + - [Code style](#code-style) + - [Formatting and Type Checking](#Formatting and Type Checking) +- [How to open a PR and contribute code to Rasa Open Source](#how-to-open-a-pr-and-contribute-code-to-rasa-open-source) + - [1. Forking the Rasa Repository](#1-forking-the-rasa-repository) + - [2. Cloning the Forked Repository Locally](#2-cloning-the-forked-repository-locally) + - [3. Update your Forked Repository](#3-update-your-forked-repository) + - [4. Implement your code contribution](#4-implement-your-code-contribution) + - [5. Push changes to your forked repository on GitHub](#5-push-changes-to-your-forked-repository-on-github) + - [6. Opening the Pull Request on Rasa Open Source](#6-opening-the-pull-request-on-rasa-open-source) + - [7. Signing the Contributor Licence Agreement (CLA)](#7-signing-the-contributor-licence-agreement-cla) + - [8. Merging your PR and the final steps of your contribution](#8-merging-your-pr-and-the-final-steps-of-your-contribution) + - [9. Share your contributions with the world!](#9-share-your-contributions-with-the-world) + - [10. Non-code contributions](#10-non-code-contributions) + + +
+ +--- + +## How to open a GitHub issue & file a bug report + +### Working on a new feature or fixing a bug + +If you would like to add a new feature or fix an existing bug, we prefer that you open a new issue on the Rasa repository before creating a pull request. + +It’s important to note that when opening an issue, you should first do a quick search of existing issues to make sure your suggestion hasn’t already been added as an issue. +If your issue doesn’t already exist, and you’re ready to create a new one, make sure to state what you would like to implement, improve or bugfix. We have provided templates to make this process easier for you. + +**To open a Github issue, go to the RasaHQ repository, select “Issues”, “New Issue” then “Feature Request” or “Bug Report” and fill out the template.** + +![](https://www.rasa.com/assets/img/contributor-guidelines/opening-new-issue.png) + +The Rasa team will then get in touch with you to discuss if the proposed feature aligns with the company's roadmap, and we will guide you along the way in shaping the proposed feature so that it could be merged to the Rasa codebase. + +### Working on an existing feature + +If you want to contribute code, but don't know what to work on, check out the Rasa contributors board to find existing open issues. + +The issues are handpicked by the Rasa team to have labels which correspond to the difficulty/estimated time needed to resolve the issue. + +**To work on an existing issue, go to the contributor project board, add a comment stating you would like to work on it and include any solutions you may already have in mind.** + +![](https://www.rasa.com/assets/img/contributor-guidelines/exiting-issue-sara.png) + +Someone from Rasa will then assign that issue to you and help you along the way. + +--- + +## How to open a GitHub Pull Request + +### What is a Pull Request (PR)? + +This is how the GitHub team defines a PR: + +> “Pull requests let you tell others about changes you’ve pushed to a branch in a repository on GitHub. Once a pull request is opened, you can discuss and review the potential changes with collaborators and add follow-up commits before your changes are merged into the base branch.” + +This process is used by both Rasa team members and Rasa contributors to make changes and improvements to Rasa Open Source. + +### What to know before opening a PR + +#### Opening issues before PRs + +We usually recommend opening an issue before a pull request if there isn’t already an issue for the problem you’d like to solve. This helps facilitate a discussion before deciding on an implementation. See How to open a GitHub issue & file a bug report. + +#### Draft PRs + +If you're ready to get some quick initial feedback from the Rasa team, you can create a draft pull request. + +#### PRs should be a reasonable length + +If your PR is greater than 500 lines, please consider splitting it into multiple smaller contributions. + +#### Code style + +To ensure a standardized code style we recommend using formatter black. To ensure our type annotations are correct we also suggest using the type checker pytype. + +#### Formatting and Type Checking + +If you want to automatically format your code on every commit, you can use pre-commit. Just install it via `pip install pre-commit` and execute `pre-commit install` in the root folder. This will add a hook to the repository, which reformats files on every commit. + +If you want to set it up manually, install black via `pip install -r requirements-dev.txt.` To reformat files execute `make formatter`. + +If you want to check types on the codebase, install pytype using `pip install -r requirements-dev.txt`. To check the types execute `make types`. + +The CI/CD tests that we run can be found in the [continous-integration.yml](https://github.com/RasaHQ/rasa/blob/master/.github/workflows/continous-integration.yml) file. + +--- + +## How to open a PR and contribute code to Rasa Open Source + +#### 1. Forking the Rasa Repository + +Head to Rasa repository and click ‘Fork’. Forking a repository creates you a copy of the project which you can edit and use to propose changes to the original project. + +![](https://www.rasa.com/assets/img/contributor-guidelines/fork.png) + +Once you fork it, a copy of the Rasa repository will appear inside your GitHub repository list. + +#### 2. Cloning the Forked Repository Locally + +To make changes to your copy of the Rasa repository, clone the repository on your local machine. To do that, run the following command in your terminal: + +``` +git clone https://github.com/your_github_username/rasa.git +``` + +The link to the repository can be found after clicking Clone or download button as shown in the image below: + +![](https://www.rasa.com/assets/img/contributor-guidelines/clone.png) + +Note: this assumes you have git installed on your local machine. If not, check out the [following guide](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) to learn how to install it. + +#### 3. Update your Forked Repository + +Before you make any changes to your cloned repository, make sure you have the latest version of the original Rasa repository. To do that, run the following commands in your terminal: + +``` +cd rasa +git remote add upstream git://github.com/RasaHQ/rasa.git +git pull upstream master +``` + +This will update the local copy of the Rasa repository to the latest version. + +#### 4. Implement your code contribution + +At this point, you are good to make changes to the files in the local directory of your project. + +Alternatively, you can create a new branch which will contain the implementation of your contribution. To do that, run: + +``` +git checkout -b name-of-your-new-branch +``` + +#### 5. Push changes to your forked repository on GitHub + +Once you are happy with the changes you made in the local files, push them to the forked repository on GitHub. To do that, run the following commands: + +``` +git add . +git commit -m ‘fixed a bug’ +git push origin name-of-your-new-branch +``` + +This will create a new branch on your forked Rasa repository, and now you’re ready to create a Pull Request with your proposed changes! + +#### 6. Opening the Pull Request on Rasa Open Source + +Head to the forked repository and click on a _Compare & pull_ request button. + +![](https://www.rasa.com/assets/img/contributor-guidelines/openpr-1.png) + +This will open a window where you can choose the repository and branch you would like to propose your changes to, as well as specific details of your contribution. In the top panel menu choose the following details: + +- Base repository: `RasaHQ/rasa` +- Base branch: `master` +- Head repository: `your-github-username/rasa` +- Head branch: `name-of-your-new-branch` + +![](https://www.rasa.com/assets/img/contributor-guidelines/openpr-2.png) + +Next, make sure to update the pull request card with as many details about your contribution as possible. _Proposed changes_ section should contain the details of what has been fixed/implemented, and Status should reflect the status of your contributions. Any reasonable change (not like a typo) should include a changelog entry, a bug fix should have a test, a new feature should have documentation, etc. + +If you are ready to get feedback on your contribution from the Rasa team, tick the _made PR ready for code review_ and _allow edits from maintainers_ box. + +Once you are happy with everything, click the _Create pull request_ button. This will create a Pull Request with your proposed changes. + +![](https://www.rasa.com/assets/img/contributor-guidelines/openpr-3.png) + +#### 7. Signing the Contributor Licence Agreement (CLA) + +To merge your contributions to the Rasa codebase, you will have to sign a Contributor License Agreement (CLA). + +It is necessary for us to know that you agree for your code to be included into the Rasa codebase and allow us to use it in our later releases. You can find a detailed Rasa Contributor Licence Agreement [here](https://cla-assistant.io/RasaHQ/rasa). + +#### 8. Merging your PR and the final steps of your contribution + +Once you sign the CLA, a member from the Rasa team will get in touch with you with the feedback on your contribution. In some cases, contributions are accepted right away, but often, you may be asked to make some edits/improvements. Don’t worry if you are asked to change something - it’s a completely normal part of software development. + +If you have been requested to make changes to your contribution, head back to the local copy of your repository on your machine, implement the changes and push them to your contribution branch by repeating instructions from step 5. Your pull request will automatically be updated with the changes you pushed. Once you've implemented all of the suggested changes, tag the person who first reviewed your contribution by mentioning them in the comments of your PR to ask them to take another look. +Finally, if your contribution is accepted, the Rasa team member will merge it to the Rasa codebase. + +#### 9. Share your contributions with the world! + +Contributing to open source can take a lot of time and effort, so you should be proud of the great work you have done! +Let the world know that you have become a contributor to the Rasa open source project by posting about it on your social media (make sure to tag @RasaHQ as well), mention the contribution on your CV and get ready to get some really cool [Rasa contributor swag](https://blog.rasa.com/announcing-the-rasa-contributor-program/)! + +#### 10. Non-code contributions + +Contributing doesn’t start and end with code. You can support the project by planning community events, creating tutorials, helping fellow community members find answers to their questions or translating documentation and news. Every contribution matters! You can find more details [on our website](https://rasa.com/community/contribute/). diff --git a/Dockerfile b/Dockerfile index 8b194629c9f3..d54eff463afa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,14 @@ -FROM python:3.6-slim as builder -# if this installation process changes, the enterprise container needs to be -# updated as well -WORKDIR /build -COPY . . -RUN python setup.py sdist bdist_wheel -RUN find dist -maxdepth 1 -mindepth 1 -name '*.tar.gz' -print0 | xargs -0 -I {} mv {} rasa.tar.gz +FROM python:3.7-slim as base -FROM python:3.6-slim +RUN apt-get update -qq \ + && apt-get install -y --no-install-recommends \ + # required by psycopg2 at build and runtime + libpq-dev \ + # required for health check + curl \ + && apt-get autoremove -y -SHELL ["/bin/bash", "-c"] +FROM base as builder RUN apt-get update -qq && \ apt-get install -y --no-install-recommends \ @@ -22,28 +22,50 @@ RUN apt-get update -qq && \ libssl-dev \ libffi6 \ libffi-dev \ - libpng-dev \ - libpq-dev \ - curl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /install + libpng-dev + +# install poetry +# keep this in sync with the version in pyproject.toml and Dockerfile +ENV POETRY_VERSION 1.0.5 +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +ENV PATH "/root/.poetry/bin:/opt/venv/bin:${PATH}" + +# copy files +COPY . /build/ + +# change working directory +WORKDIR /build -WORKDIR /install +# install dependencies +RUN python -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --no-cache-dir -U 'pip<20' && \ + poetry install --no-dev --no-root --no-interaction && \ + poetry build -f wheel -n && \ + pip install --no-deps dist/*.whl && \ + rm -rf dist *.egg-info -# Copy as early as possible so we can cache ... -COPY requirements.txt . +# start a new build stage +FROM base as runner -RUN pip install -r requirements.txt --no-cache-dir +# copy everything from /opt +COPY --from=builder /opt/venv /opt/venv -COPY --from=builder /build/rasa.tar.gz . -RUN pip install ./rasa.tar.gz[sql] +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -VOLUME ["/app"] +# update permissions & change user to not run as root WORKDIR /app +RUN chgrp -R 0 /app && chmod -R g=u /app +USER 1001 -EXPOSE 5005 +# create a volume for temporary data +VOLUME /tmp -ENTRYPOINT ["rasa"] +# change shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# the entry point +EXPOSE 5005 +ENTRYPOINT ["rasa"] CMD ["--help"] diff --git a/LICENSE.txt b/LICENSE.txt index 5bc6cd1f0a2f..2b30dad0e434 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2019 Rasa Technologies GmbH + Copyright 2020 Rasa Technologies GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index dbf42c4940cb..000000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,4 +0,0 @@ -include LICENSE.txt README.md requirements.txt requirements-dev.txt -include rasa/core/schemas/* rasa/core/training/visualization.html -include rasa/cli/default_config.yml -recursive-include rasa/cli/initial_project * diff --git a/Makefile b/Makefile index 98ff782bc6ad..d5981d90981d 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,15 @@ -.PHONY: clean test lint init check-readme +.PHONY: clean test lint init docs + +JOBS ?= 1 help: + @echo "make" @echo " clean" @echo " Remove Python/build artifacts." + @echo " install" + @echo " Install rasa." + @echo " install-full" + @echo " Install rasa with all extras (transformers, tensorflow_text, spacy, jieba)." @echo " formatter" @echo " Apply black formatting to code." @echo " lint" @@ -13,16 +20,17 @@ help: @echo " Install system requirements for running tests on Ubuntu and Debian based systems." @echo " prepare-tests-macos" @echo " Install system requirements for running tests on macOS." + @echo " prepare-tests-windows" + @echo " Install system requirements for running tests on Windows." @echo " prepare-tests-files" @echo " Download all additional project files needed to run tests." @echo " test" @echo " Run pytest on tests/." - @echo " check-readme" - @echo " Check if the README can be converted from .md to .rst for PyPI." - @echo " doctest" - @echo " Run all doctests embedded in the documentation." + @echo " Use the JOBS environment variable to configure number of workers (default: 1)." @echo " livedocs" @echo " Build the docs locally." + @echo " release" + @echo " Prepare a release." clean: find . -name '*.pyc' -exec rm -f {} + @@ -31,40 +39,69 @@ clean: rm -rf build/ rm -rf .pytype/ rm -rf dist/ - rm -rf docs/_build + rm -rf docs/build + rm -rf docs/.docusaurus + +install: + poetry run python -m pip install -U pip + poetry install + +install-mitie: + poetry run python -m pip install -U git+https://github.com/tmbo/MITIE.git#egg=mitie + +install-full: install install-mitie + poetry install -E full + +install-full-windows: install install-mitie + # tensorflow_text is not available on Windows, so we're skipping it + # see https://github.com/tensorflow/text/issues/44 for more details + poetry install -E spacy -E transformers -E jieba + +install-docs: + cd docs/ && yarn install formatter: - black rasa tests + poetry run black rasa tests lint: - flake8 rasa tests - black --check rasa tests + poetry run flake8 rasa tests + poetry run black --check rasa tests types: - pytype --keep-going rasa + poetry run pytype --keep-going rasa -j 16 -prepare-tests-macos: prepare-tests-files - brew install graphviz +prepare-tests-files: + poetry install -E spacy + poetry run python -m spacy download en_core_web_md + poetry run python -m spacy download de_core_news_sm + poetry run python -m spacy link en_core_web_md en --force + poetry run python -m spacy link de_core_news_sm de --force + wget --progress=dot:giga -N -P data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat + +prepare-wget-macos: + brew install wget || true + +prepare-wget-windows: + choco install wget + +prepare-tests-macos: prepare-wget-macos prepare-tests-files + brew install graphviz || true prepare-tests-ubuntu: prepare-tests-files - sudo apt-get -y install graphviz graphviz-dev python3-tk + sudo apt-get -y install graphviz graphviz-dev python-tk -prepare-tests-files: - pip3 install https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.1.0/en_core_web_md-2.1.0.tar.gz#egg=en_core_web_md==2.1.0 --no-cache-dir -q - python -m spacy link en_core_web_md en --force - pip3 install https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-2.1.0/de_core_news_sm-2.1.0.tar.gz#egg=de_core_news_sm==2.1.0 --no-cache-dir -q - python -m spacy link de_core_news_sm de --force - wget --progress=dot:giga -N -P data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat +prepare-tests-windows: prepare-wget-windows prepare-tests-files + choco install graphviz test: clean - py.test tests --cov rasa + # OMP_NUM_THREADS can improve overall performance using one thread by process (on tensorflow), avoiding overload + OMP_NUM_THREADS=1 poetry run pytest tests -n $(JOBS) --cov rasa -doctest: clean - cd docs && make doctest +docs: + cd docs/ && poetry run yarn pre-build && yarn build livedocs: - cd docs && make livehtml + cd docs/ && poetry run yarn start -# if this runs through we can be sure the readme is properly shown on pypi -check-readme: - python setup.py check --restructuredtext --strict +release: + poetry run python scripts/release.py diff --git a/PRINCIPLES.md b/PRINCIPLES.md new file mode 100644 index 000000000000..d603101395b1 --- /dev/null +++ b/PRINCIPLES.md @@ -0,0 +1,28 @@ + + +When you create a conversational assistant, you are responsible for its impact on the people that it talks to. +Therefore, you should consider how users might perceive the assistant’s statements, and how a conversation might affect their lives. +This is not always straightforward, as you typically have little to no knowledge about the background of your users. +Thus, we created this guide to help you avoid the worst outcomes. + +It is in the best interest of all conversational assistant creators that the public perceives these assistants as helpful and friendly. +Beyond this, it is also in the best interest of all members of society (including creators) that conversational assistants are not used for harassment or manipulation. Aside from being unethical, such use cases would create a lasting reluctance of users to engage with conversational assistants. + +The following four key points should help you use this technology wisely. Please note, however, that these guidelines are only a first step, and you should use your own judgement as well. + + +## 1. **A conversational assistant should not cause users harm**. + +Even though a conversational assistant only exists in the digital world, it can still inflict harm on users simply by communicating with them in a certain way. For example, assistants are often used as information sources or decision guides. If the information that the assistant provides is inaccurate or misleading, users may end up making poor (or even dangerous) decisions based on their interaction with your assistant. + +## 2. **A conversational assistant should not encourage or normalize harmful behaviour from users**. + +Although users have complete freedom in what they can communicate to a conversational assistants, these assistants are designed to only follow pre-defined stories. In doing so, a conversational assistant should not try to provoke the user into engaging in harmful behaviour. If for any reason the user decides to engage in this behaviour anyway, the assistant should politely refuse to participate. In other words, treating such behaviour as normal or acceptable should be avoided. Trying to argue with the user will very rarely lead to useful results. + +## 3. **A conversational assistant should always identify itself as one**. + +When asked questions such as “Are you a bot?” or “Are you a human?”, a bot should always inform the user that it is indeed an assistant, and not a human. Impostor bots (algorithms that pose as humans) are a major piece of platform manipulation techniques, and this creates a lot of mistrust. Instead of misleading users, we should build assistants that truly support them, thereby enabling a larger fraction of work to be done by conversational assistants in the long-term (as users become more accustomed to them). This does not mean that conversational assistants can’t be human-like. + +## 4. **A conversational assistant should provide users a way to prove its identity**. + +When an assistant is designed to communicate with users while representing a company, organization, etc., it is important to allow users to verify that this representation has been previously authorized. It’s possible to use already existing technologies to do this: for example, by integrating a conversational assistant to a website served using HTTPS, the content of the site (and therefore the assistant itself) will be guaranteed to be legitimate by a trusted certificate authority. Another example would be to have the conversational assistant use a “verified” social media account. diff --git a/README.md b/README.md index 743a84f29ac6..c923e85a2659 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,22 @@ -# Rasa (formerly Rasa Core + Rasa NLU) +# Rasa Open Source [![Join the chat on Rasa Community Forum](https://img.shields.io/badge/forum-join%20discussions-brightgreen.svg)](https://forum.rasa.com/?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![PyPI version](https://badge.fury.io/py/rasa.svg)](https://badge.fury.io/py/rasa) [![Supported Python Versions](https://img.shields.io/pypi/pyversions/rasa.svg)](https://pypi.python.org/pypi/rasa) -[![Build Status](https://travis-ci.com/RasaHQ/rasa.svg?branch=master)](https://travis-ci.com/RasaHQ/rasa) +[![Build Status](https://github.com/RasaHQ/rasa/workflows/Continuous%20Integration/badge.svg)](https://github.com/RasaHQ/rasa/actions) [![Coverage Status](https://coveralls.io/repos/github/RasaHQ/rasa/badge.svg?branch=master)](https://coveralls.io/github/RasaHQ/rasa?branch=master) [![Documentation Status](https://img.shields.io/badge/docs-stable-brightgreen.svg)](https://rasa.com/docs) +![Documentation Build](https://img.shields.io/netlify/d2e447e4-5a5e-4dc7-be5d-7c04ae7ff706?label=Documentation%20Build) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B8141%2Fgit%40github.com%3ARasaHQ%2Frasa.git.svg?type=shield)](https://app.fossa.com/projects/custom%2B8141%2Fgit%40github.com%3ARasaHQ%2Frasa.git?ref=badge_shield) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](https://github.com/orgs/RasaHQ/projects/23) - + -Rasa is an open source machine learning framework to automate text-and voice-based conversations. With Rasa, you can build chatbots on: +Rasa is an open source machine learning framework to automate text-and voice-based conversations. With Rasa, you can build contextual assistants on: - Facebook Messenger - Slack +- Google Hangouts +- Webex Teams - Microsoft Bot Framework - Rocket.Chat - Mattermost @@ -25,10 +28,10 @@ or voice assistants as: - Alexa Skills - Google Home Actions -Rasa's primary purpose is to help you build contextual, layered -conversations with lots of back-and-forth. To have a real conversation, -you need to have some memory and build on things that were said earlier. -Rasa lets you do that in a scalable way. +Rasa helps you build contextual assistants capable of having layered conversations with +lots of back-and-forth. In order for a human to have a meaningful exchange with a contextual +assistant, the assistant needs to be able to use context to build on things that were previously +discussed – Rasa enables you to build assistants that can do this in a scalable way. There's a lot more background information in this [blog post](https://medium.com/rasa-blog/a-new-approach-to-conversational-software-2e64a5d05f2a). @@ -71,9 +74,7 @@ questions. - [License](#license) ### How to contribute -We are very happy to receive and merge your contributions. You can -find more information about how to contribute to Rasa (in lots of -different ways!) [here](http://rasa.com/community/contribute). +We are very happy to receive and merge your contributions into this repository! To contribute via pull request, follow these steps: @@ -82,6 +83,11 @@ To contribute via pull request, follow these steps: 2. Write your code, tests and documentation, and format them with ``black`` 3. Create a pull request describing your changes +For more detailed instructions on how to contribute code, check out these [code contributor guidelines](CONTRIBUTING.md). + +You can find more information about how to contribute to Rasa (in lots of +different ways!) [on our website.](http://rasa.com/community/contribute). + Your pull request will be reviewed by a maintainer, who will get back to you about any necessary changes or questions. You will also be asked to sign a @@ -89,41 +95,115 @@ also be asked to sign a ## Development Internals -### Running and changing the documentation -To build & edit the docs, first install all necessary dependencies: +### Installing Poetry + +Rasa uses Poetry for packaging and dependency management. If you want to build it from source, +you have to install Poetry first. This is how it can be done: + +```bash +curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python ``` -pip3 install -r requirements-dev.txt -pip3 install -r requirements-docs.txt + +There are several other ways to install Poetry. Please, follow +[the official guide](https://python-poetry.org/docs/#installation) to see all possible options. + +### Managing environments + +The official [Poetry guide](https://python-poetry.org/docs/managing-environments/) suggests to use +[pyenv](https://github.com/pyenv/pyenv) or any other similar tool to easily switch between Python versions. +This is how it can be done: + +```bash +pyenv install 3.7.6 +pyenv local 3.7.6 # Activate Python 3.7.6 for the current project +``` + +By default, Poetry will try to use the currently activated Python version to create the virtual environment +for the current project automatically. You can also create and activate a virtual environment manually — in this +case, Poetry should pick it up and use it to install the dependencies. For example: + +```bash +python -m venv .venv +source .venv/bin/activate +``` + +You can make sure that the environment is picked up by executing + +```bash +poetry env info +``` + +### Building from source + +To install dependencies and `rasa` itself in editable mode execute + +```bash +make install +``` + +### Running and changing the documentation + +First of all, install all the required dependencies: + +```bash +make install install-docs ``` After the installation has finished, you can run and view the documentation locally using: -``` + +```bash make livedocs ``` -Visit the local version of the docs at http://localhost:8000 in your browser. +It should open a new tab with the local version of the docs in your browser; +if not, visit http://localhost:3000 in your browser. You can now change the docs locally and the web page will automatically reload and apply your changes. ### Running the Tests + In order to run the tests, make sure that you have the development requirements installed: + ```bash -export PIP_USE_PEP517=false -pip3 install -r requirements-dev.txt -pip3 install -e . make prepare-tests-ubuntu # Only on Ubuntu and Debian based systems make prepare-tests-macos # Only on macOS ``` Then, run the tests: + ```bash make test ``` +They can also be run at multiple jobs to save some time: + +```bash +JOBS=[n] make test +``` + +Where `[n]` is the number of jobs desired. If omitted, `[n]` will be automatically chosen by pytest. + +### Resolving merge conflicts + +Poetry doesn't include any solution that can help to resolve merge conflicts in +the lock file `poetry.lock` by default. +However, there is a great tool called [poetry-merge-lock](https://poetry-merge-lock.readthedocs.io/en/latest/). +Here is how you can install it: + +```bash +pip install poetry-merge-lock +``` + +Just execute this command to resolve merge conflicts in `poetry.lock` automatically: + +```bash +poetry-merge-lock +``` + ### Steps to release a new version -Releasing a new version is quite simple, as the packages are build and distributed by travis. +Releasing a new version is quite simple, as the packages are build and distributed by GitHub Actions. *Terminology*: * patch release (third version part increases): 1.1.2 -> 1.1.3 @@ -131,20 +211,22 @@ Releasing a new version is quite simple, as the packages are build and distribut * major release (first version part increases): 1.2.0 -> 2.0.0 *Release steps*: -1. Create a new branch and - - * Update [rasa/version.py](https://github.com/RasaHQ/rasa/blob/master/rasa/version.py) to reflect the correct version number - * Edit the [CHANGELOG.rst](https://github.com/RasaHQ/rasa/blob/master/CHANGELOG.rst), create a new section for the release (eg by moving the items from the collected master section) and create a new master logging section - * Edit the [migration guide](https://github.com/RasaHQ/rasa/blob/master/docs/migration-guide.rst) to provide assistance for users updating to the new version -2. Commit the changes and create a PR against master or the release branch (e.g. `1.2.x`) -3. Once your PR is merged, tag a new release (this SHOULD always happen on master or release branches), e.g. using - ``` - git tag 1.2.0 -m "Some helpful line describing the release" - git push origin 1.2.0 --tags - ``` - travis will build this tag and push a package to [pypi](https://pypi.python.org/pypi/rasa) -5. **If this is a minor release**, a new release branch should be created pointing to the same commit as the tag to allow for future patch releases, e.g. +1. Make sure all dependencies are up to date (**especially Rasa SDK**) + - For Rasa SDK that means first creating a [new Rasa SDK release](https://github.com/RasaHQ/rasa-sdk#steps-to-release-a-new-version) (make sure the version numbers between the new Rasa and Rasa SDK releases match) + - Once the tag with the new Rasa SDK release is pushed and the package appears on [pypi](https://pypi.org/project/rasa-sdk/), the dependency in the rasa repository can be resolved (see below). +2. Switch to the branch you want to cut the release from (`master` in case of a major / minor, the current feature branch for patch releases) + - Update the `rasa-sdk` entry in `pyproject.toml` with the new release version and run `poetry update`. This creates a new `poetry.lock` file with all dependencies resolved. + - Commit the changes with `git commit -am "bump rasa-sdk dependency"` but do not push them. They will be automatically picked up by the following step. +3. Run `make release` +4. Create a PR against master or the release branch (e.g. `1.2.x`) +5. Once your PR is merged, tag a new release (this SHOULD always happen on master or release branches), e.g. using + ```bash + git tag 1.2.0 -m "next release" + git push origin 1.2.0 ``` + GitHub will build this tag and push a package to [pypi](https://pypi.python.org/pypi/rasa) +6. **If this is a minor release**, a new release branch should be created pointing to the same commit as the tag to allow for future patch releases, e.g. + ```bash git checkout -b 1.2.x git push origin 1.2.x ``` @@ -153,7 +235,7 @@ Releasing a new version is quite simple, as the packages are build and distribut To ensure a standardized code style we use the formatter [black](https://github.com/ambv/black). To ensure our type annotations are correct we use the type checker [pytype](https://github.com/google/pytype). -If your code is not formatted properly or doesn't type check, travis will fail to build. +If your code is not formatted properly or doesn't type check, GitHub will fail to build. #### Formatting @@ -161,7 +243,7 @@ If you want to automatically format your code on every commit, you can use [pre- Just install it via `pip install pre-commit` and execute `pre-commit install` in the root folder. This will add a hook to the repository, which reformats files on every commit. -If you want to set it up manually, install black via `pip install -r requirements-dev.txt`. +If you want to set it up manually, install black via `poetry install`. To reformat files execute ``` make formatter @@ -169,7 +251,7 @@ make formatter #### Type Checking -If you want to check types on the codebase, install `pytype` using `pip install -r requirements-dev.txt`. +If you want to check types on the codebase, install `pytype` using `poetry install`. To check the types execute ``` make types @@ -177,17 +259,16 @@ make types ### Deploying documentation updates -We use `sphinx-versioning` to build docs for tagged versions and for the master branch. -The static site that gets built is pushed to the `docs` branch of this repo, which doesn't contain -any code, only the site. +We use `Docusaurus v2` to build docs for tagged versions and for the master branch. +The static site that gets built is pushed to the `documentation` branch of this repo. -We host the site on netlify. When there is a reason to update the docs (e.g. master has changed or we have -tagged a new version) we trigger a webhook on netlify (see `.travis.yml`). +We host the site on netlify. On master branch builds (see `.github/workflows/documentation.yml`), we push the built docs to +the `documentation` branch. Netlify automatically re-deploys the docs pages whenever there is a change to that branch. ## License Licensed under the Apache License, Version 2.0. -Copyright 2019 Rasa Technologies GmbH. [Copy of the license](LICENSE.txt). +Copyright 2020 Rasa Technologies GmbH. [Copy of the license](LICENSE.txt). A list of the Licenses of the dependencies of the project can be found at the bottom of the diff --git a/alt_requirements/conda-requirements.txt b/alt_requirements/conda-requirements.txt deleted file mode 100644 index 0226a1d382be..000000000000 --- a/alt_requirements/conda-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -scipy==1.2.1 -scikit-learn==0.20.2 diff --git a/alt_requirements/requirements_bare.txt b/alt_requirements/requirements_bare.txt deleted file mode 100644 index 1f09da7f4352..000000000000 --- a/alt_requirements/requirements_bare.txt +++ /dev/null @@ -1,3 +0,0 @@ -# deprecated, will be removed. please use the `requirements.txt` -# in the mainfolder instead --r ../requirements.txt diff --git a/alt_requirements/requirements_dev.txt b/alt_requirements/requirements_dev.txt deleted file mode 100644 index 4b703c324292..000000000000 --- a/alt_requirements/requirements_dev.txt +++ /dev/null @@ -1,3 +0,0 @@ -# deprecated, will be removed. please use the `requirements-dev.txt` -# in the mainfolder instead --r ../requirements-dev.txt diff --git a/alt_requirements/requirements_docs.txt b/alt_requirements/requirements_docs.txt deleted file mode 100644 index 793e19abf788..000000000000 --- a/alt_requirements/requirements_docs.txt +++ /dev/null @@ -1,3 +0,0 @@ -# deprecated, will be removed. please use the `requirements-docs.txt` -# in the mainfolder instead --r ../requirements-docs.txt diff --git a/alt_requirements/requirements_full.txt b/alt_requirements/requirements_full.txt deleted file mode 100644 index a700141c6b13..000000000000 --- a/alt_requirements/requirements_full.txt +++ /dev/null @@ -1,10 +0,0 @@ -# Minimum Install Requirements --r ../requirements.txt - -# Spacy Requirements --r requirements_pretrained_embeddings_spacy.txt - -# MITIE Requirements --r requirements_pretrained_embeddings_mitie.txt - -jieba==0.39 diff --git a/alt_requirements/requirements_pretrained_embeddings_mitie.txt b/alt_requirements/requirements_pretrained_embeddings_mitie.txt deleted file mode 100644 index 21078470c318..000000000000 --- a/alt_requirements/requirements_pretrained_embeddings_mitie.txt +++ /dev/null @@ -1,4 +0,0 @@ -# Minimum Install Requirements --r ../requirements.txt - -git+https://github.com/tmbo/MITIE.git#egg=mitie diff --git a/alt_requirements/requirements_pretrained_embeddings_spacy.txt b/alt_requirements/requirements_pretrained_embeddings_spacy.txt deleted file mode 100644 index 456a7dd568b7..000000000000 --- a/alt_requirements/requirements_pretrained_embeddings_spacy.txt +++ /dev/null @@ -1,4 +0,0 @@ -# Minimum Install Requirements --r ../requirements.txt - -spacy==2.1.4 diff --git a/binder/postBuild b/binder/postBuild old mode 100755 new mode 100644 index 132f51676194..a5ddc2f087ba --- a/binder/postBuild +++ b/binder/postBuild @@ -1 +1 @@ -python -m spacy download en \ No newline at end of file +poetry run python -m spacy download en diff --git a/binder/requirements.txt b/binder/requirements.txt deleted file mode 100644 index ec55bc0303d9..000000000000 --- a/binder/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ --r ../requirements.txt - -service_identity==18.1.0 diff --git a/changelog/.gitignore b/changelog/.gitignore new file mode 100644 index 000000000000..d50008f8a47f --- /dev/null +++ b/changelog/.gitignore @@ -0,0 +1,2 @@ +# Except this file +!.gitignore diff --git a/changelog/4441.doc.md b/changelog/4441.doc.md new file mode 100644 index 000000000000..588c38617a4b --- /dev/null +++ b/changelog/4441.doc.md @@ -0,0 +1 @@ +Added documentation on `ambiguity_threshold` parameter in Fallback Actions page. diff --git a/changelog/4536.improvement.md b/changelog/4536.improvement.md new file mode 100644 index 000000000000..746bc5f23cd7 --- /dev/null +++ b/changelog/4536.improvement.md @@ -0,0 +1 @@ +Add support for Python 3.8. diff --git a/changelog/4605.doc.md b/changelog/4605.doc.md new file mode 100644 index 000000000000..6266a5d58014 --- /dev/null +++ b/changelog/4605.doc.md @@ -0,0 +1 @@ +Remove outdated whitespace tokenizer warning in Testing Your Assistant documentation. diff --git a/changelog/4745.feature.md b/changelog/4745.feature.md new file mode 100644 index 000000000000..a39694812c29 --- /dev/null +++ b/changelog/4745.feature.md @@ -0,0 +1,4 @@ +Added template name to the metadata of bot utterance events. + +`BotUttered` event contains a `template_name` property in its metadata for any +new bot message. diff --git a/changelog/5038.bugfix.md b/changelog/5038.bugfix.md new file mode 100644 index 000000000000..076d86596051 --- /dev/null +++ b/changelog/5038.bugfix.md @@ -0,0 +1,3 @@ +Fixed a bug in the `CountVectorsFeaturizer` which resulted in the very first +message after loading a model to be processed incorrectly due to the vocabulary +not being loaded yet. diff --git a/changelog/5086.feature.md b/changelog/5086.feature.md new file mode 100644 index 000000000000..8a6d4260e940 --- /dev/null +++ b/changelog/5086.feature.md @@ -0,0 +1,2 @@ +Added a `--num-threads` CLI argument that can be passed to `rasa train` +and will be used to train NLU components. diff --git a/changelog/5135.bugfix.md b/changelog/5135.bugfix.md new file mode 100644 index 000000000000..1e5dcd09bc7a --- /dev/null +++ b/changelog/5135.bugfix.md @@ -0,0 +1,2 @@ +Fixed Rasa shell skipping button messages if buttons are attached to +a message previous to the latest. diff --git a/changelog/5385.bugfix.md b/changelog/5385.bugfix.md new file mode 100644 index 000000000000..3fb43479b4a5 --- /dev/null +++ b/changelog/5385.bugfix.md @@ -0,0 +1 @@ +Stack level for `FutureWarning` updated to level 2. diff --git a/changelog/5453.bugfix.md b/changelog/5453.bugfix.md new file mode 100644 index 000000000000..3468dcd44cae --- /dev/null +++ b/changelog/5453.bugfix.md @@ -0,0 +1,2 @@ +If custom utter message contains no value or integer value, then it fails +returning custom utter message. Fixed by converting the template to type string. \ No newline at end of file diff --git a/changelog/5481.improvement.md b/changelog/5481.improvement.md new file mode 100644 index 000000000000..8a6757007087 --- /dev/null +++ b/changelog/5481.improvement.md @@ -0,0 +1 @@ +`DIETClassifier` now also assigns a confidence value to entity predictions. diff --git a/changelog/5510.feature.md b/changelog/5510.feature.md new file mode 100644 index 000000000000..c06c16ad9285 --- /dev/null +++ b/changelog/5510.feature.md @@ -0,0 +1,46 @@ +You can now define what kind of features should be used by what component +(see [Choosing a Pipeline](./tuning-your-model.mdx)). + +You can set an alias via the option `alias` for every featurizer in your pipeline. +The `alias` can be anything, by default it is set to the full featurizer class name. +You can then specify, for example, on the +[DIETClassifier](./components/intent-classifiers.mdx#diet-classifier) what features from which +featurizers should be used. +If you don't set the option `featurizers` all available features will be used. +This is also the default behavior. +Check components to see what components have the option +`featurizers` available. + +Here is an example pipeline that shows the new option. +We define an alias for all featurizers in the pipeline. +All features will be used in the `DIETClassifier`. +However, the `ResponseSelector` only takes the features from the +`ConveRTFeaturizer` and the `CountVectorsFeaturizer` (word level). + +``` +pipeline: +- name: ConveRTTokenizer +- name: ConveRTFeaturizer + alias: "convert" +- name: CountVectorsFeaturizer + alias: "cvf_word" +- name: CountVectorsFeaturizer + alias: "cvf_char" + analyzer: char_wb + min_ngram: 1 + max_ngram: 4 +- name: RegexFeaturizer + alias: "regex" +- name: LexicalSyntacticFeaturizer + alias: "lsf" +- name: DIETClassifier: +- name: ResponseSelector + epochs: 50 + featurizers: ["convert", "cvf_word"] +- name: EntitySynonymMapper +``` + +:::caution +This change is model-breaking. Please retrain your models. + +::: diff --git a/changelog/5617.bugfix.md b/changelog/5617.bugfix.md new file mode 100644 index 000000000000..a3cf0560d2eb --- /dev/null +++ b/changelog/5617.bugfix.md @@ -0,0 +1 @@ +Don't create TensorBoard log files during prediction. diff --git a/changelog/5637.improvement.md b/changelog/5637.improvement.md new file mode 100644 index 000000000000..0c9697e1580e --- /dev/null +++ b/changelog/5637.improvement.md @@ -0,0 +1,3 @@ +Added behavior to the `rasa --version` command. It will now also list information +about the operating system, python version and `rasa-sdk`. This will make it easier +for users to file bug reports. diff --git a/changelog/5638.bugfix.md b/changelog/5638.bugfix.md new file mode 100644 index 000000000000..89130064642f --- /dev/null +++ b/changelog/5638.bugfix.md @@ -0,0 +1 @@ +Fixed DIET breaking with empty spaCy model. diff --git a/changelog/5640.doc.md b/changelog/5640.doc.md new file mode 100644 index 000000000000..fb5b9c80c271 --- /dev/null +++ b/changelog/5640.doc.md @@ -0,0 +1 @@ +Updated Facebook Messenger channel docs with supported attachment information diff --git a/changelog/5675.docs.md b/changelog/5675.docs.md new file mode 100644 index 000000000000..4199c931191d --- /dev/null +++ b/changelog/5675.docs.md @@ -0,0 +1,2 @@ +Update `rasa shell` documentation to explain how to recreate external +channel session behavior. diff --git a/changelog/5737.bugfix.md b/changelog/5737.bugfix.md new file mode 100644 index 000000000000..042318f77ecb --- /dev/null +++ b/changelog/5737.bugfix.md @@ -0,0 +1,4 @@ +Pinned the library version for the Azure +[Cloud Storage](./model-storage.mdx#server-fetch-from-remote-storage) to 2.1.0 since the +persistor is currently not compatible with later versions of the azure-storage-blob +library. diff --git a/changelog/5743.improvement.md b/changelog/5743.improvement.md new file mode 100644 index 000000000000..f4dbf91b8024 --- /dev/null +++ b/changelog/5743.improvement.md @@ -0,0 +1,4 @@ +Support for additional training metadata. + +Training data messages now to support kwargs and the Rasa JSON data reader +includes all fields when instantiating a training data instance. diff --git a/changelog/5748.improvement.md b/changelog/5748.improvement.md new file mode 100644 index 000000000000..dfae8f1149c1 --- /dev/null +++ b/changelog/5748.improvement.md @@ -0,0 +1,8 @@ +Standardize testing output. The following test output can be produced for intents, +responses, entities and stories: +- report: a detailed report with testing metrics per label (e.g. precision, + recall, accuracy, etc.) +- errors: a file that contains incorrect predictions +- successes: a file that contains correct predictions +- confusion matrix: plot of confusion matrix +- histogram: plot of confidence distribution (not available for stories) diff --git a/changelog/5755.bugfix.md b/changelog/5755.bugfix.md new file mode 100644 index 000000000000..2b199f0e7dcb --- /dev/null +++ b/changelog/5755.bugfix.md @@ -0,0 +1,2 @@ +Remove `clean_up_entities` from extractors that extract pre-defined entities. +Just keep the clean up method for entity extractors that extract custom entities. \ No newline at end of file diff --git a/changelog/5756.improvement.md b/changelog/5756.improvement.md new file mode 100644 index 000000000000..e43ab6e3c989 --- /dev/null +++ b/changelog/5756.improvement.md @@ -0,0 +1,10 @@ +To avoid the problem of our entity extractors predicting entity labels for +just a part of the words, we introduced a cleaning method after the prediction +was done. We should avoid the incorrect prediction in the first place. +To achieve this we will not tokenize words into sub-words anymore. +We take the mean feature vectors of the sub-words as the feature vector of the word. + +:::caution +This change is model breaking. Please, retrain your models. + +::: diff --git a/changelog/5757.removal.md b/changelog/5757.removal.md new file mode 100644 index 000000000000..7ecacb2fdcaf --- /dev/null +++ b/changelog/5757.removal.md @@ -0,0 +1,3 @@ +Removed previously deprecated packages `rasa_nlu` and `rasa_core`. + +Use imports from `rasa.core` and `rasa.nlu` instead. diff --git a/changelog/5758.removal.md b/changelog/5758.removal.md new file mode 100644 index 000000000000..e2526d6f0620 --- /dev/null +++ b/changelog/5758.removal.md @@ -0,0 +1,14 @@ +Removed previously deprecated classes: +- event brokers (`EventChannel` and `FileProducer`, `KafkaProducer`, + `PikaProducer`, `SQLProducer`) +- intent classifier `EmbeddingIntentClassifier` +- policy `KerasPolicy` + +Removed previously deprecated methods: +- `Agent.handle_channels` +- `TrackerStore.create_tracker_store` + +Removed support for pipeline templates in `config.yml` + +Removed deprecated training data keys `entity_examples` and `intent_examples` from +json training data format. diff --git a/changelog/5759.improvement.md b/changelog/5759.improvement.md new file mode 100644 index 000000000000..26f45c2475ec --- /dev/null +++ b/changelog/5759.improvement.md @@ -0,0 +1,3 @@ +Move option `case_sensitive` from the tokenizers to the featurizers. +- Remove the option from the `WhitespaceTokenizer` and `ConveRTTokenizer`. +- Add option `case_sensitive` to the `RegexFeaturizer`. diff --git a/changelog/5766.improvement.md b/changelog/5766.improvement.md new file mode 100644 index 000000000000..9edbce26b3da --- /dev/null +++ b/changelog/5766.improvement.md @@ -0,0 +1 @@ +If a user sends a voice message to the bot using Facebook, users messages was set to the attachments URL. The same is now also done for the rest of attachment types (image, video, and file). diff --git a/changelog/5784.misc.rst b/changelog/5784.misc.rst new file mode 100644 index 000000000000..14b7844cbddd --- /dev/null +++ b/changelog/5784.misc.rst @@ -0,0 +1,3 @@ +Add testing of Rasa Open Source installations on Windows. + +Update tests to support Windows. \ No newline at end of file diff --git a/changelog/5788.misc.md b/changelog/5788.misc.md new file mode 100644 index 000000000000..e714f0159949 --- /dev/null +++ b/changelog/5788.misc.md @@ -0,0 +1,2 @@ +Don't set roles and groups to `O` (nothing found) when constructing entities +from model predictions. \ No newline at end of file diff --git a/changelog/5792.bugfix.md b/changelog/5792.bugfix.md new file mode 100644 index 000000000000..aeb84d20b1aa --- /dev/null +++ b/changelog/5792.bugfix.md @@ -0,0 +1,2 @@ +Fixed issue where the `DucklingHTTPExtractor` component would +not work if its `url` contained a trailing slash. diff --git a/changelog/5794.improvement.md b/changelog/5794.improvement.md new file mode 100644 index 000000000000..3879ee2599bf --- /dev/null +++ b/changelog/5794.improvement.md @@ -0,0 +1,3 @@ +Creating a `Domain` using `Domain.fromDict` can no longer alter the input dictionary. +Previously, there could be problems when the input dictionary was re-used for other +things after creating the `Domain` from it. diff --git a/changelog/5805.improvement.md b/changelog/5805.improvement.md new file mode 100644 index 000000000000..2918565498d9 --- /dev/null +++ b/changelog/5805.improvement.md @@ -0,0 +1,4 @@ +The debug-level logs when instantiating an +[SQLTrackerStore](./tracker-stores.mdx#sql-tracker-store) +no longer show the password in plain text. Now, the URL is displayed with the password +hidden, e.g. `postgresql://username:***@localhost:5432`. diff --git a/changelog/5808.bugfix.md b/changelog/5808.bugfix.md new file mode 100644 index 000000000000..0dc69429846c --- /dev/null +++ b/changelog/5808.bugfix.md @@ -0,0 +1 @@ +Changed to variable `CERT_URI` in `hangouts.py` to a string type diff --git a/changelog/5811.docs.md b/changelog/5811.docs.md new file mode 100644 index 000000000000..0005ae6abadf --- /dev/null +++ b/changelog/5811.docs.md @@ -0,0 +1 @@ +Event brokers documentation should say `url` instead of `host`. diff --git a/changelog/5834.removal.md b/changelog/5834.removal.md new file mode 100644 index 000000000000..e6bbe6ca9b04 --- /dev/null +++ b/changelog/5834.removal.md @@ -0,0 +1 @@ +Removed `restaurantbot` example as it was confusing and not a great way to build a bot. diff --git a/changelog/5837.feature.md b/changelog/5837.feature.md new file mode 100644 index 000000000000..e3ff61a957b4 --- /dev/null +++ b/changelog/5837.feature.md @@ -0,0 +1,2 @@ +Added `--port` commandline argument to the interactive learning mode to allow +changing the port for the Rasa server running in the background. diff --git a/changelog/5850.bugfix.md b/changelog/5850.bugfix.md new file mode 100644 index 000000000000..b0406faa0cb5 --- /dev/null +++ b/changelog/5850.bugfix.md @@ -0,0 +1,3 @@ +Slots will be correctly interpolated for `button` responses. + +Previously this resulted in no interpolation due to a bug. \ No newline at end of file diff --git a/changelog/5855.improvement.md b/changelog/5855.improvement.md new file mode 100644 index 000000000000..d4a6649484c3 --- /dev/null +++ b/changelog/5855.improvement.md @@ -0,0 +1,3 @@ +Shorten the information in tqdm during training ML algorithms based on the log +level. If you train your model in debug mode, all available metrics will be +shown during training, otherwise, the information is shorten. diff --git a/changelog/5905.bugfix.md b/changelog/5905.bugfix.md new file mode 100644 index 000000000000..0bfd7f5e2f67 --- /dev/null +++ b/changelog/5905.bugfix.md @@ -0,0 +1,3 @@ +Remove option `token_pattern` from `CountVectorsFeaturizer`. +Instead all tokenizers now have the option `token_pattern`. +If a regular expression is set, the tokenizer will apply the token pattern. diff --git a/changelog/5913.improvement.md b/changelog/5913.improvement.md new file mode 100644 index 000000000000..44d5d117eade --- /dev/null +++ b/changelog/5913.improvement.md @@ -0,0 +1,4 @@ +Ignore conversation test directory `tests/` when importing a project +using `MultiProjectImporter` and `use_e2e` is `False`. +Previously, any story data found in a project subdirectory would be imported +as training data. diff --git a/changelog/5952.doc.md b/changelog/5952.doc.md new file mode 100644 index 000000000000..0792cfff2d97 --- /dev/null +++ b/changelog/5952.doc.md @@ -0,0 +1,2 @@ +Update `rasa init` documentation to include `tests/conversation_tests.md` +in the resulting directory tree. diff --git a/changelog/5957.feature.md b/changelog/5957.feature.md new file mode 100644 index 000000000000..c3a1a0cf1c97 --- /dev/null +++ b/changelog/5957.feature.md @@ -0,0 +1,2 @@ +Add new entity extractor `RegexEntityExtractor`. The entity extractor extracts entities using the lookup tables +and regexes defined in the training data. For more information see [RegexEntityExtractor](./components/entity-extractors.mdx#regexentityextractor). diff --git a/changelog/5964.bugfix.md b/changelog/5964.bugfix.md new file mode 100644 index 000000000000..2154d7496032 --- /dev/null +++ b/changelog/5964.bugfix.md @@ -0,0 +1 @@ +Fixed a bug when custom metadata passed with the utterance always restarted the session. diff --git a/changelog/5996.feature.md b/changelog/5996.feature.md new file mode 100644 index 000000000000..e7b778c2e3a3 --- /dev/null +++ b/changelog/5996.feature.md @@ -0,0 +1,2 @@ +Introduced a new `YAML` format for Core training data and implemented a parser +for it. Rasa Open Source can now read stories in both `Markdown` and `YAML` format. diff --git a/changelog/5998.bugfix.md b/changelog/5998.bugfix.md new file mode 100644 index 000000000000..8a7a3ae60d97 --- /dev/null +++ b/changelog/5998.bugfix.md @@ -0,0 +1 @@ +`WhitespaceTokenizer` does not remove vowel signs in Hindi anymore. diff --git a/changelog/6020.feature.md b/changelog/6020.feature.md new file mode 100644 index 000000000000..f6b13ca8e801 --- /dev/null +++ b/changelog/6020.feature.md @@ -0,0 +1,11 @@ +You can now enable threaded message responses from Rasa through the Slack connector. +This option is enabled using an optional configuration in the credentials.yml file + +```yaml + slack: + slack_token: + slack_channel: + use_threads: True +``` + +Button support has also been added in the Slack connector. diff --git a/changelog/6024.improvement.md b/changelog/6024.improvement.md new file mode 100644 index 000000000000..a5ff657d2f41 --- /dev/null +++ b/changelog/6024.improvement.md @@ -0,0 +1,2 @@ +`rasa data split nlu` now makes sure that there is at least one example per +intent and response in the test data. diff --git a/changelog/6042.bugfix.md b/changelog/6042.bugfix.md new file mode 100644 index 000000000000..2860fcd8f5a4 --- /dev/null +++ b/changelog/6042.bugfix.md @@ -0,0 +1,2 @@ +Convert entity values coming from `DucklingHTTPExtractor` to string +during evaluation to avoid mismatches due to different types. diff --git a/changelog/6044.improvement.md b/changelog/6044.improvement.md new file mode 100644 index 000000000000..0d3705a7c21b --- /dev/null +++ b/changelog/6044.improvement.md @@ -0,0 +1,2 @@ +Do not deepcopy slots when instantiating trackers. This leads to a significant +speedup when training on domains with a large number of slots. diff --git a/changelog/6045.improvment.md b/changelog/6045.improvment.md new file mode 100644 index 000000000000..cb716f2e5fff --- /dev/null +++ b/changelog/6045.improvment.md @@ -0,0 +1,21 @@ +We updated the way how we save and use features in our NLU pipeline. + +The message object now has a dedicated field, called `features`, to store the +features that are generated in the NLU pipeline. We adapted all our featurizers in a +way that sequence and sentence features are stored independently. This allows us to +keep different kind of features for the sequence and the sentence. For example, the +`LexicalSyntacticFeaturizer` does not produce any sentence features anymore as our +experiments showed that those did not bring any performance gain just quite a lot of +additional values to store. + +We also modified the DIET architecture to process the sequence and sentence +features independently at first. The features are concatenated just before +the transformer. + +We also removed the `__CLS__` token again. Our Tokenizers will not +add this token anymore. + +:::caution +This change is model-breaking. Please retrain your models. + +::: \ No newline at end of file diff --git a/changelog/6052.improvement.md b/changelog/6052.improvement.md new file mode 100644 index 000000000000..39acf41a643e --- /dev/null +++ b/changelog/6052.improvement.md @@ -0,0 +1 @@ +Add endpoint kwarg to `rasa.jupyter.chat` to enable using a custom action server while chatting with a model in a jupyter notebook. diff --git a/changelog/6053.bugfix.md b/changelog/6053.bugfix.md new file mode 100644 index 000000000000..f2e062202513 --- /dev/null +++ b/changelog/6053.bugfix.md @@ -0,0 +1,3 @@ +Update `FeatureSignature` to store just the feature dimension instead of the +complete shape. This change fixes the usage of the option `share_hidden_layers` +in the `DIETClassifier`. diff --git a/changelog/6065.feature.md b/changelog/6065.feature.md new file mode 100644 index 000000000000..3e4acd850ad0 --- /dev/null +++ b/changelog/6065.feature.md @@ -0,0 +1,2 @@ +Add support for [rules](./rules.mdx) data and [forms](./forms.mdx) in YAML +format. diff --git a/changelog/6066.feature.md b/changelog/6066.feature.md new file mode 100644 index 000000000000..31df9c4cf124 --- /dev/null +++ b/changelog/6066.feature.md @@ -0,0 +1,5 @@ +The NLU `interpreter` is now passed to the [Policies](./policies.mdx) during training and +inference time. Note that this requires an additional parameter `interpreter` in the +method `predict_action_probabilities` of the `Policy` interface. In case a +custom `Policy` implementation doesn't provide this parameter Rasa Open Source +will print a warning and omit passing the `interpreter`. diff --git a/changelog/6087.bugfix.md b/changelog/6087.bugfix.md new file mode 100644 index 000000000000..b957f8f9be63 --- /dev/null +++ b/changelog/6087.bugfix.md @@ -0,0 +1,3 @@ +Unescape the `\n, \t, \r, \f, \b` tokens on reading nlu data from markdown files. + +On converting json files into markdown, the tokens mentioned above are espaced. These tokens need to be unescaped on loading the data from markdown to ensure that the data is treated in the same way. \ No newline at end of file diff --git a/changelog/6088.feature.md b/changelog/6088.feature.md new file mode 100644 index 000000000000..011c30b1b4e3 --- /dev/null +++ b/changelog/6088.feature.md @@ -0,0 +1,23 @@ +Added the new dialogue policy RulePolicy which will replace the old “rule-like” +policies [Mapping Policy](./policies.mdx#mapping-policy), +[Fallback Policy](./policies.mdx#fallback-policy), +[Two-Stage Fallback Policy](./policies.mdx#two-stage-fallback-policy), and +[Form Policy](./policies.mdx#form-policy). These policies are now +deprecated and will be removed in the future. Please see the +[rules documentation](./rules.mdx) for more information. + +Added new NLU component [FallbackClassifier](./components/intent-classifiers.mdx#fallbackclassifier) +which predicts an intent `nlu_fallback` in case the confidence was below a given +threshold. The intent `nlu_fallback` may +then be used to write stories / rules to handle the fallback in case of low NLU +confidence. + +```python +pipeline: +- ... # Other NLU components +- name: FallbackClassifier + # If the highest ranked intent has a confidence lower than the threshold then + # the NLU pipeline predicts an intent `nlu_fallback` which you can then be used in + # stories / rules to implement an appropriate fallback. + threshold: 0.5 +``` diff --git a/changelog/6120.bugfix.md b/changelog/6120.bugfix.md new file mode 100644 index 000000000000..af7edfa32af0 --- /dev/null +++ b/changelog/6120.bugfix.md @@ -0,0 +1,3 @@ +Fix the way training data is generated in rasa test nlu when using the `-P` flag. +Each percentage of the training dataset used to be formed as a part of the last +sampled training dataset and not as a sample from the original training dataset. diff --git a/changelog/6123.improvement.md b/changelog/6123.improvement.md new file mode 100644 index 000000000000..c84799f412f9 --- /dev/null +++ b/changelog/6123.improvement.md @@ -0,0 +1 @@ +Add support for proxy use in [slack](./connectors/slack.mdx) input channel. diff --git a/changelog/6132.feature.md b/changelog/6132.feature.md new file mode 100644 index 000000000000..29ad7ea7294c --- /dev/null +++ b/changelog/6132.feature.md @@ -0,0 +1,5 @@ +Added possibility to split the domain into separate files. All YAML files +under the path specified with `--domain` will be scanned for domain +information (e.g. intents, actions, etc) and then combined into a single domain. + +The default value for `--domain` is still `domain.yml`. diff --git a/changelog/6134.improvement.md b/changelog/6134.improvement.md new file mode 100644 index 000000000000..ec9ee9c1c4f5 --- /dev/null +++ b/changelog/6134.improvement.md @@ -0,0 +1 @@ +Log the number of examples per intent during training. Logging can be enabled using `rasa train --debug`. diff --git a/changelog/6143.bugfix.md b/changelog/6143.bugfix.md new file mode 100644 index 000000000000..a45efbe77f82 --- /dev/null +++ b/changelog/6143.bugfix.md @@ -0,0 +1 @@ +Prevent `WhitespaceTokenizer` from outputting empty list of tokens. \ No newline at end of file diff --git a/changelog/6160.bugfix.md b/changelog/6160.bugfix.md new file mode 100644 index 000000000000..cfc0b3a2ca0e --- /dev/null +++ b/changelog/6160.bugfix.md @@ -0,0 +1 @@ +Consider entity roles/groups during interactive learning. diff --git a/changelog/6191.bugfix.md b/changelog/6191.bugfix.md new file mode 100644 index 000000000000..5cad532e7124 --- /dev/null +++ b/changelog/6191.bugfix.md @@ -0,0 +1 @@ +If two entities are separated by a comma (or any other symbol), extract them as two separate entities. diff --git a/changelog/6198.bugfix.md b/changelog/6198.bugfix.md new file mode 100644 index 000000000000..7a2f6bd595e1 --- /dev/null +++ b/changelog/6198.bugfix.md @@ -0,0 +1 @@ +Add `EntityExtractor` as a required component for `EntitySynonymMapper` in a pipeline. diff --git a/changelog/6199.misc.md b/changelog/6199.misc.md new file mode 100644 index 000000000000..3424c2e83cfb --- /dev/null +++ b/changelog/6199.misc.md @@ -0,0 +1 @@ +Move `RestInput` from `rasa.core.channels.channel` to `rasa.core.channels.rest`. diff --git a/changelog/6226.improvement.md b/changelog/6226.improvement.md new file mode 100644 index 000000000000..65f0d2bc2d95 --- /dev/null +++ b/changelog/6226.improvement.md @@ -0,0 +1,7 @@ +Added more debugging logs to the [lock store documentation](./lock-stores.mdx) to +simplify debugging in case of connection problems. + +Added a new parameter `socket_timeout` to the `RedisLockStore`. If Redis doesn't +answer within `socket_timeout` seconds to requests from Rasa Open Source, an error +is raised. This avoids seemingly infinitely blocking connections and exposes connection +problems early. diff --git a/changelog/6231.bugfix.md b/changelog/6231.bugfix.md new file mode 100644 index 000000000000..b32fe919ff0e --- /dev/null +++ b/changelog/6231.bugfix.md @@ -0,0 +1 @@ +When using the `DynamoTrackerStore`, if there are more than 100 DynamoDB tables, the tracker could attempt to re-create an existing table if that table was not among the first 100 listed by the dynamo API. diff --git a/changelog/6237.improvement.md b/changelog/6237.improvement.md new file mode 100644 index 000000000000..d40101ffc2ec --- /dev/null +++ b/changelog/6237.improvement.md @@ -0,0 +1 @@ +Support for other remote storages can be achieved by using an external library. \ No newline at end of file diff --git a/changelog/6280.bugfix.md b/changelog/6280.bugfix.md new file mode 100644 index 000000000000..bec548a24f11 --- /dev/null +++ b/changelog/6280.bugfix.md @@ -0,0 +1 @@ +Fixed `TypeError: expected string or bytes-like object` issue caused by integer, boolean, and null values in templates. diff --git a/changelog/6282.bugfix.md b/changelog/6282.bugfix.md new file mode 100644 index 000000000000..64668858281b --- /dev/null +++ b/changelog/6282.bugfix.md @@ -0,0 +1 @@ +Fixed a deprication warning that pops up due to changes in numpy \ No newline at end of file diff --git a/changelog/6291.bugfix.md b/changelog/6291.bugfix.md new file mode 100644 index 000000000000..028318f99e56 --- /dev/null +++ b/changelog/6291.bugfix.md @@ -0,0 +1,3 @@ +Update `rasabaster` to fix an issue with syntax highlighting on "Prototype an Assistant" page. + +Update default stories and rules on "Prototype an Assistant" page. diff --git a/changelog/6323.improvement.md b/changelog/6323.improvement.md new file mode 100644 index 000000000000..ea8deac1c390 --- /dev/null +++ b/changelog/6323.improvement.md @@ -0,0 +1,7 @@ +[Response selector templates](retrieval-actions.mdx) now support all features that +domain utterances do. They use the yaml format instead of markdown now. +This means you can now use buttons, images, ... in your FAQ or chitchat responses +(assuming they are using the response selector). + +As a consequence, training data form in markdown has to have the file +suffix `.md` from now on to allow proper file type detection- diff --git a/changelog/6340.bugfix.rst b/changelog/6340.bugfix.rst new file mode 100644 index 000000000000..507b48261742 --- /dev/null +++ b/changelog/6340.bugfix.rst @@ -0,0 +1,2 @@ +If two entities are separated by a single space and uses BILOU tagging, +extract them as two separate entities based on their BILOU tags. \ No newline at end of file diff --git a/changelog/6354.feature.md b/changelog/6354.feature.md new file mode 100644 index 000000000000..06574c7682f4 --- /dev/null +++ b/changelog/6354.feature.md @@ -0,0 +1,4 @@ +The Rasa Open Source API endpoint `POST /model/train` now supports training data in YAML +format. Please specify the header `Content-Type: application/yaml` when +training a model using YAML training data. +See the [API documentation](./http-api-spec.mdx) for more information. diff --git a/changelog/6354.removal.md b/changelog/6354.removal.md new file mode 100644 index 000000000000..2cf581ea3c5c --- /dev/null +++ b/changelog/6354.removal.md @@ -0,0 +1,4 @@ +Specifying the parameters `force` and `save_to_default_model_directory` as part of the +JSON payload when training a model using `POST /model/train` is now deprecated. +Please use the query parameters `force_training` and `save_to_default_model_directory` +instead. See the [API documentation](./http-api-spec.mdx) for more information. diff --git a/changelog/6374.feature.md b/changelog/6374.feature.md new file mode 100644 index 000000000000..d87358d535e2 --- /dev/null +++ b/changelog/6374.feature.md @@ -0,0 +1 @@ +Added a YAML schema and a writer for 2.0 Training Core data. \ No newline at end of file diff --git a/changelog/6404.feature.md b/changelog/6404.feature.md new file mode 100644 index 000000000000..88d180150c57 --- /dev/null +++ b/changelog/6404.feature.md @@ -0,0 +1 @@ +Users can now use the ``rasa data convert {nlu|core} -f yaml`` command to convert training data from Markdown format to YAML format. \ No newline at end of file diff --git a/changelog/6409.removal.md b/changelog/6409.removal.md new file mode 100644 index 000000000000..35ac7642a428 --- /dev/null +++ b/changelog/6409.removal.md @@ -0,0 +1,6 @@ +The conversation event `form` was renamed to `active_loop`. Rasa Open Source +will continue to be able to read and process old `form` events. Note that +serialized trackers will no longer have the `active_form` field. Instead the +`active_loop` field will contain the same information. Story representations +in Markdown and YAML will use `active_loop` instead of `form` to represent the +event. diff --git a/changelog/README.md b/changelog/README.md new file mode 100644 index 000000000000..218f6e076057 --- /dev/null +++ b/changelog/README.md @@ -0,0 +1,37 @@ +This directory contains "newsfragments" which are short files that contain a small +**Markdown**-formatted text that will be added to the next `CHANGELOG`. + +The `CHANGELOG` will be read by **users**, so this description should be aimed +to Rasa OSS users instead of describing internal changes which are only relevant +to the developers. + +Make sure to use full sentences in the **past or present tense** and use +punctuation, examples: + + Slots will be correctly interpolated if there are lists in custom response templates. + + Previously this resulted in no interpolation. + +Each file should be named like `..md`, where +`` is an issue / PR number, and `` is one of: + +* `feature`: new user facing features, like new command-line options and new behavior. +* `improvement`: improvement of existing functionality, usually without requiring user intervention. +* `bugfix`: fixes a reported bug. +* `doc`: documentation improvement, like rewording an entire session or adding missing docs. +* `removal`: feature deprecation or feature removal. +* `misc`: fixing a small typo or internal change, will not be included in the changelog. + +So for example: `123.feature.md`, `456.bugfix.md`. + +If your PR fixes an issue, use that number here. If there is no issue, +then after you submit the PR and get the PR number you can add a +changelog using that instead. + +If you are not sure what issue type to use, don't hesitate to ask in your PR. + +`towncrier` preserves multiple paragraphs and formatting (code blocks, lists, +and so on), but for entries other than `features` it is usually better to stick +to a single paragraph to keep it concise. You can install `towncrier` and then +run `towncrier --draft` if you want to get a preview of how your change will look +in the final release notes. diff --git a/changelog/_template.md.jinja2 b/changelog/_template.md.jinja2 new file mode 100644 index 000000000000..1369ad3fba7a --- /dev/null +++ b/changelog/_template.md.jinja2 @@ -0,0 +1,10 @@ +{# Based on https://github.com/hawkowl/towncrier/blob/master/src/towncrier/templates/default.rst #} +{% for section in sections %}{% if section %}{{section}}{% endif %}{% if sections[section] %}{% for category, val in definitions.items() if category in sections[section] %} + +{{ "### " + definitions[category]['name'] }} +{% if definitions[category]['showcontent'] %}{% for text, values in sections[section][category]|dictsort(by='value') %}{% set issue_joiner = joiner(', ') %}- {% for value in values|sort %}{{ issue_joiner() }}{{ value }}{% endfor %}: {{ text }} +{% endfor %}{% else %}- {{ sections[section][category]['']|sort|join(', ') }}{% endif %}{% if sections[section][category]|length == 0 %} No significant changes. + +{% else %}{% endif %}{% endfor %}{% else %} No significant changes. + +{% endif %}{% endfor %} \ No newline at end of file diff --git a/data/configs_for_docs/config_featurizers.yml b/data/configs_for_docs/config_featurizers.yml new file mode 100644 index 000000000000..76efec4bde19 --- /dev/null +++ b/data/configs_for_docs/config_featurizers.yml @@ -0,0 +1,23 @@ +language: "en" + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + alias: "convert" + - name: RegexFeaturizer + alias: "regex" + - name: LexicalSyntacticFeaturizer + alias: "lexical-syntactic" + - name: CountVectorsFeaturizer + alias: "cvf-word" + - name: CountVectorsFeaturizer + alias: "cvf-char" + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + featurizers: ["convert", "cvf-word"] + epochs: 100 diff --git a/data/configs_for_docs/default_config.yml b/data/configs_for_docs/default_config.yml new file mode 100644 index 000000000000..6831910068ce --- /dev/null +++ b/data/configs_for_docs/default_config.yml @@ -0,0 +1,16 @@ +language: "fr" # your two-letter language code + +pipeline: + - name: WhitespaceTokenizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 \ No newline at end of file diff --git a/data/configs_for_docs/default_english_config.yml b/data/configs_for_docs/default_english_config.yml new file mode 100644 index 000000000000..0cc304f04d69 --- /dev/null +++ b/data/configs_for_docs/default_english_config.yml @@ -0,0 +1,17 @@ +language: "en" + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 diff --git a/data/configs_for_docs/default_spacy_config.yml b/data/configs_for_docs/default_spacy_config.yml new file mode 100644 index 000000000000..757036b54d5b --- /dev/null +++ b/data/configs_for_docs/default_spacy_config.yml @@ -0,0 +1,18 @@ +language: "fr" # your two-letter language code + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 diff --git a/data/configs_for_docs/example_for_suggested_config.yml b/data/configs_for_docs/example_for_suggested_config.yml new file mode 100644 index 000000000000..ecddcbed95d4 --- /dev/null +++ b/data/configs_for_docs/example_for_suggested_config.yml @@ -0,0 +1,11 @@ +language: en + +pipeline: +# will be selected by the Suggested Config feature + +policies: +- name: MemoizationPolicy +- name: TEDPolicy + max_history: 5 + epochs: 10 +- name: MappingPolicy diff --git a/data/configs_for_docs/example_for_suggested_config_after_train.yml b/data/configs_for_docs/example_for_suggested_config_after_train.yml new file mode 100644 index 000000000000..091502e53515 --- /dev/null +++ b/data/configs_for_docs/example_for_suggested_config_after_train.yml @@ -0,0 +1,27 @@ +language: en + +pipeline: +# # No configuration for the NLU pipeline was provided. The following default pipeline was used to train your model. +# # If you'd like to customize it, uncomment and adjust the pipeline. +# # See https://rasa.com/docs/rasa/nlu/choosing-a-pipeline/ for more information. +# - name: ConveRTTokenizer +# - name: ConveRTFeaturizer +# - name: RegexFeaturizer +# - name: LexicalSyntacticFeaturizer +# - name: CountVectorsFeaturizer +# - name: CountVectorsFeaturizer +# analyzer: char_wb +# min_ngram: 1 +# max_ngram: 4 +# - name: DIETClassifier +# epochs: 100 +# - name: EntitySynonymMapper +# - name: ResponseSelector +# epochs: 100 + +policies: +- name: MemoizationPolicy +- name: TEDPolicy + max_history: 5 + epochs: 10 +- name: MappingPolicy diff --git a/data/configs_for_docs/pretrained_embeddings_convert_config.yml b/data/configs_for_docs/pretrained_embeddings_convert_config.yml new file mode 100644 index 000000000000..6b1f95a7c453 --- /dev/null +++ b/data/configs_for_docs/pretrained_embeddings_convert_config.yml @@ -0,0 +1,6 @@ +language: "en" + +pipeline: +- name: "ConveRTTokenizer" +- name: "ConveRTFeaturizer" +- name: "DIETClassifier" diff --git a/sample_configs/config_pretrained_embeddings_mitie.yml b/data/configs_for_docs/pretrained_embeddings_mitie_config_1.yml similarity index 100% rename from sample_configs/config_pretrained_embeddings_mitie.yml rename to data/configs_for_docs/pretrained_embeddings_mitie_config_1.yml diff --git a/sample_configs/config_pretrained_embeddings_mitie_2.yml b/data/configs_for_docs/pretrained_embeddings_mitie_config_2.yml similarity index 100% rename from sample_configs/config_pretrained_embeddings_mitie_2.yml rename to data/configs_for_docs/pretrained_embeddings_mitie_config_2.yml diff --git a/data/configs_for_docs/pretrained_embeddings_spacy_config.yml b/data/configs_for_docs/pretrained_embeddings_spacy_config.yml new file mode 100644 index 000000000000..14aca60c5a69 --- /dev/null +++ b/data/configs_for_docs/pretrained_embeddings_spacy_config.yml @@ -0,0 +1,11 @@ +language: "en" + +pipeline: +- name: "SpacyNLP" +- name: "SpacyTokenizer" +- name: "SpacyFeaturizer" +- name: "RegexFeaturizer" +- name: "CRFEntityExtractor" +- name: "EntitySynonymMapper" + +- name: "SklearnIntentClassifier" diff --git a/data/configs_for_docs/supervised_embeddings_config.yml b/data/configs_for_docs/supervised_embeddings_config.yml new file mode 100644 index 000000000000..7973f3f451ac --- /dev/null +++ b/data/configs_for_docs/supervised_embeddings_config.yml @@ -0,0 +1,13 @@ +language: "en" + +pipeline: +- name: "WhitespaceTokenizer" +- name: "RegexFeaturizer" +- name: "CRFEntityExtractor" +- name: "EntitySynonymMapper" +- name: "CountVectorsFeaturizer" +- name: "CountVectorsFeaturizer" + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 +- name: "DIETClassifier" diff --git a/data/examples/luis/demo-restaurants.json b/data/examples/luis/demo-restaurants_v2.json similarity index 100% rename from data/examples/luis/demo-restaurants.json rename to data/examples/luis/demo-restaurants_v2.json diff --git a/data/examples/luis/demo-restaurants_v4.json b/data/examples/luis/demo-restaurants_v4.json new file mode 100644 index 000000000000..0490e7b3e68a --- /dev/null +++ b/data/examples/luis/demo-restaurants_v4.json @@ -0,0 +1,244 @@ +{ + "luis_schema_version": "4.0.0", + "versionId": "0.1", + "name": "demo-restaurants", + "desc": "", + "culture": "en-us", + "tokenizerVersion": "1.0.0", + "intents": [ + { + "name": "affirm" + }, + { + "name": "goodbye" + }, + { + "name": "greet" + }, + { + "name": "inform" + }, + { + "name": "None" + } + ], + "entities": [ + { + "name": "cuisine" + }, + { + "name": "location" + } + ], + "bing_entities": [], + "actions": [], + "composites": [], + "closedLists": [], + "patternAnyEntities": [], + "regex_entities": [], + "prebuiltEntities": [], + "model_features": [], + "regex_features": [], + "patterns": [], + "settings": [], + "utterances": [ + { + "text": "hello", + "intent": "greet", + "entities": [] + }, + { + "text": "hey", + "intent": "greet", + "entities": [] + }, + { + "text": "hi", + "intent": "greet", + "entities": [] + }, + { + "text": "hey there", + "intent": "greet", + "entities": [] + }, + { + "text": "howdy", + "intent": "greet", + "entities": [] + }, + { + "text": "i'm looking for a place to eat", + "intent": "inform", + "entities": [] + }, + { + "text": "i'm looking for a place in the north of town", + "intent": "inform", + "entities": [ + { + "entity": "location", + "startPos": 31, + "endPos": 35 + } + ] + }, + { + "text": "show me chinese restaurants", + "intent": "greet", + "entities": [ + { + "entity": "cuisine", + "startPos": 8, + "endPos": 14 + } + ] + }, + { + "text": "yes", + "intent": "affirm", + "entities": [] + }, + { + "text": "yep", + "intent": "affirm", + "entities": [] + }, + { + "text": "yeah", + "intent": "affirm", + "entities": [] + }, + { + "text": "show me a mexican place in the centre", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 10, + "endPos": 16 + }, + { + "entity": "location", + "startPos": 31, + "endPos": 36 + } + ] + }, + { + "text": "bye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "goodbye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "good bye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "stop", + "intent": "goodbye", + "entities": [] + }, + { + "text": "end", + "intent": "goodbye", + "entities": [] + }, + { + "text": "i am looking for an indian spot", + "intent": "inform", + "entities": [] + }, + { + "text": "search for restaurants", + "intent": "inform", + "entities": [] + }, + { + "text": "anywhere in the west", + "intent": "inform", + "entities": [ + { + "entity": "location", + "startPos": 16, + "endPos": 19 + } + ] + }, + { + "text": "central indian restaurant", + "intent": "greet", + "entities": [ + { + "entity": "cuisine", + "startPos": 8, + "endPos": 13 + }, + { + "entity": "location", + "startPos": 0, + "endPos": 6 + } + ] + }, + { + "text": "indeed", + "intent": "affirm", + "entities": [] + }, + { + "text": "that's right", + "intent": "affirm", + "entities": [] + }, + { + "text": "ok", + "intent": "affirm", + "entities": [] + }, + { + "text": "great", + "intent": "affirm", + "entities": [] + }, + { + "text": "do you know any good vietnamese places?", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 21, + "endPos": 30 + } + ] + }, + { + "text": "i want some russian food", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 12, + "endPos": 18 + } + ] + }, + { + "text": "any indonesian places?", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 4, + "endPos": 13 + } + ] + } + ] +} diff --git a/data/examples/luis/demo-restaurants_v5.json b/data/examples/luis/demo-restaurants_v5.json new file mode 100644 index 000000000000..034f8be1fd2f --- /dev/null +++ b/data/examples/luis/demo-restaurants_v5.json @@ -0,0 +1,242 @@ +{ + "luis_schema_version": "5.0.0", + "versionId": "0.1", + "name": "demo-restaurants", + "desc": "", + "culture": "en-us", + "tokenizerVersion": "1.0.0", + "intents": [ + { + "name": "affirm" + }, + { + "name": "goodbye" + }, + { + "name": "greet" + }, + { + "name": "inform" + }, + { + "name": "None" + } + ], + "entities": [ + { + "name": "cuisine" + }, + { + "name": "location" + } + ], + "composites": [], + "closedLists": [], + "patternAnyEntities": [], + "regex_entities": [], + "prebuiltEntities": [], + "model_features": [], + "regex_features": [], + "patterns": [], + "utterances": [ + { + "text": "hello", + "intent": "greet", + "entities": [] + }, + { + "text": "hey", + "intent": "greet", + "entities": [] + }, + { + "text": "hi", + "intent": "greet", + "entities": [] + }, + { + "text": "hey there", + "intent": "greet", + "entities": [] + }, + { + "text": "howdy", + "intent": "greet", + "entities": [] + }, + { + "text": "i'm looking for a place to eat", + "intent": "inform", + "entities": [] + }, + { + "text": "i'm looking for a place in the north of town", + "intent": "inform", + "entities": [ + { + "entity": "location", + "startPos": 31, + "endPos": 35 + } + ] + }, + { + "text": "show me chinese restaurants", + "intent": "greet", + "entities": [ + { + "entity": "cuisine", + "startPos": 8, + "endPos": 14 + } + ] + }, + { + "text": "yes", + "intent": "affirm", + "entities": [] + }, + { + "text": "yep", + "intent": "affirm", + "entities": [] + }, + { + "text": "yeah", + "intent": "affirm", + "entities": [] + }, + { + "text": "show me a mexican place in the centre", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 10, + "endPos": 16 + }, + { + "entity": "location", + "startPos": 31, + "endPos": 36 + } + ] + }, + { + "text": "bye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "goodbye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "good bye", + "intent": "goodbye", + "entities": [] + }, + { + "text": "stop", + "intent": "goodbye", + "entities": [] + }, + { + "text": "end", + "intent": "goodbye", + "entities": [] + }, + { + "text": "i am looking for an indian spot", + "intent": "inform", + "entities": [] + }, + { + "text": "search for restaurants", + "intent": "inform", + "entities": [] + }, + { + "text": "anywhere in the west", + "intent": "inform", + "entities": [ + { + "entity": "location", + "startPos": 16, + "endPos": 19 + } + ] + }, + { + "text": "central indian restaurant", + "intent": "greet", + "entities": [ + { + "entity": "cuisine", + "startPos": 8, + "endPos": 13 + }, + { + "entity": "location", + "startPos": 0, + "endPos": 6 + } + ] + }, + { + "text": "indeed", + "intent": "affirm", + "entities": [] + }, + { + "text": "that's right", + "intent": "affirm", + "entities": [] + }, + { + "text": "ok", + "intent": "affirm", + "entities": [] + }, + { + "text": "great", + "intent": "affirm", + "entities": [] + }, + { + "text": "do you know any good vietnamese places?", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 21, + "endPos": 30 + } + ] + }, + { + "text": "i want some russian food", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 12, + "endPos": 18 + } + ] + }, + { + "text": "any indonesian places?", + "intent": "inform", + "entities": [ + { + "entity": "cuisine", + "startPos": 4, + "endPos": 13 + } + ] + } + ], + "settings": [] +} diff --git a/data/examples/rasa/demo-rasa-multi-intent.md b/data/examples/rasa/demo-rasa-multi-intent.md new file mode 100644 index 000000000000..d94a81455335 --- /dev/null +++ b/data/examples/rasa/demo-rasa-multi-intent.md @@ -0,0 +1,71 @@ +## intent:affirm +- yes +- yep +- yeah +- indeed +- that's right +- ok +- great +- right, thank you +- correct +- great choice +- sounds really good + +## intent:goodbye +- bye +- goodbye +- good bye +- stop +- end +- farewell +- Bye bye +- have a good one + +## intent:greet +- hey +- howdy +- hey there +- hello +- hi +- good morning +- good evening +- dear sir + +## intent:chitchat+ask_name +- What's your name? +- What can I call you? + +## intent:chitchat+ask_weather +- How's the weather? +- Is it too hot outside? + +## intent:restaurant_search +- i'm looking for a place to eat +- I want to grab lunch +- I am searching for a dinner spot +- i'm looking for a place in the [north](location) of town +- show me [chinese](cuisine) restaurants +- show me [chines](cuisine:chinese) restaurants in the [north](location) +- show me a [mexican](cuisine) place in the [centre](location) +- i am looking for an [indian](cuisine) spot called olaolaolaolaolaola +- search for restaurants +- anywhere in the [west](location) +- anywhere near [18328](location) +- I am looking for [asian fusion](cuisine) food +- I am looking a restaurant in [29432](location) +- I am looking for [mexican indian fusion](cuisine) +- [central](location) [indian](cuisine) restaurant + +## synonym:chinese ++ Chines +* Chinese + +## synonym:vegetarian +- vegg +- veggie + +## regex:zipcode +- [0-9]{5} + +## regex:greet +- hey[^\s]* \ No newline at end of file diff --git a/data/examples/rasa/demo-rasa.json b/data/examples/rasa/demo-rasa.json index 4b86741260d6..ae644add933a 100644 --- a/data/examples/rasa/demo-rasa.json +++ b/data/examples/rasa/demo-rasa.json @@ -23,17 +23,17 @@ "common_examples": [ { "text": "hey", - "intent": "greet", + "intent": "greet", "entities": [] }, { "text": "howdy", - "intent": "greet", + "intent": "greet", "entities": [] }, { "text": "hey there", - "intent": "greet", + "intent": "greet", "entities": [] }, { diff --git a/data/rasa_yaml_examples/nlu.yml b/data/rasa_yaml_examples/nlu.yml new file mode 100644 index 000000000000..25d182652b44 --- /dev/null +++ b/data/rasa_yaml_examples/nlu.yml @@ -0,0 +1,10 @@ +nlu: +- intent: estimate_emissions + # Arbitrary metadata + metadata: + author: Some example metadata! + key: value + # Multiline examples, each line is a separate training example. + examples: | + how much CO2 will that use? + how much carbon will a one way flight from [new york]{"entity": "city", "role": "from"} to california produce? diff --git a/data/test/config_embedding_test.yml b/data/test/config_embedding_test.yml index 379e1e2e3ef7..1c9eb116fb09 100644 --- a/data/test/config_embedding_test.yml +++ b/data/test/config_embedding_test.yml @@ -2,5 +2,5 @@ language: en pipeline: - name: "CountVectorsFeaturizer" max_ngram: 3 -- name: "EmbeddingIntentClassifier" - epochs: 10 \ No newline at end of file +- name: "DIETClassifier" + epochs: 10 diff --git a/data/test/demo-rasa-composite-entities.md b/data/test/demo-rasa-composite-entities.md new file mode 100644 index 000000000000..f5e5017f449b --- /dev/null +++ b/data/test/demo-rasa-composite-entities.md @@ -0,0 +1,56 @@ +## intent:affirm +- yes +- yep +- yeah +- indeed +- that's right +- ok +- great +- right, thank you +- correct +- great choice +- sounds really good + +## intent:goodbye +- bye +- goodbye +- good bye +- stop +- end +- farewell +- Bye bye +- have a good one + +## intent:greet +- hey +- howdy +- hey there +- hello +- hi +- good morning +- good evening +- dear sir + +## intent:chitchat +- What's your name? +- What can I call you? +- How's the weather? +- Is it too hot outside? + +## intent:book_flight +- i'm looking for a flight +- I want to book a flight +- i'm looking for a flight to [Berlin]{"entity": "location", "role": "to"} +- show me flights from [Amsterdam]{"entity": "location", "role": "from"} +- show me flights to [London]{"entity": "location", "role": "to"} +- i am looking for a flight from [SF]{"entity": "location", "value": "San Fransisco", "role": "from"} to [New York]{"entity": "location", "role": "to"} +- search for flights +- from [Madrid]{"entity": "location", "role": "from"} to [Munich]{"entity": "location", "role": "to"} +- any flight to [Liverpool]{"entity": "location", "role": "to"} + +## intent:order_pizza +- i want a [large]{"entity": "size", "group": "1"} pizza with [tomato]{"entity": "topping", "group": "1"} and a [small]{"entity": "size", "group": "2"} pizza with [bacon]{"entity": "topping", "group": "2"} +- one [large]{"entity": "size", "group": "1"} with [pepperoni]{"entity": "topping", "group": "1"} and a [medium]{"entity": "size", "group": "2"} with [mushrooms]{"entity": "topping", "group": "2"} +- I would like a [medium]{"entity": "size", "group": "1"} standard pizza and a [medium]{"entity": "size", "group": "2"} pizza with [extra cheese]{"entity": "topping", "group": "2"} +- [large]{"entity": "size", "group": "1"} with [onions]{"entity": "topping", "group": "1"} and [small]{"entity": "size", "group": "2"} with [olives]{"entity": "topping", "group": "2"} +- a pizza with [onions]{"entity": "topping", "group": "1"} in [medium]{"entity": "size", "group": "1"} and one with [mushrooms]{"entity": "topping", "group": "2"} in [small]{"entity": "size", "group": "2"} please \ No newline at end of file diff --git a/data/test/duplicate_intents_markdown/demo-rasa-intents-1.md b/data/test/duplicate_intents_markdown/demo-rasa-intents-1.md new file mode 100644 index 000000000000..cadfbff88ac2 --- /dev/null +++ b/data/test/duplicate_intents_markdown/demo-rasa-intents-1.md @@ -0,0 +1,32 @@ +## intent:affirm +- yes +- yep +- yeah +- indeed +- that's right +- ok +- great +- right, thank you +- correct +- great choice +- sounds really good + +## intent:goodbye +- bye +- goodbye +- good bye +- good bye +- good bye +- good bye +- stop +- end + +## intent:greet +- hey +- howdy +- hey there +- hello +- hi +- good morning +- good evening +- dear sir \ No newline at end of file diff --git a/data/test/duplicate_intents_markdown/demo-rasa-intents-2.md b/data/test/duplicate_intents_markdown/demo-rasa-intents-2.md new file mode 100644 index 000000000000..7c960a8cd93b --- /dev/null +++ b/data/test/duplicate_intents_markdown/demo-rasa-intents-2.md @@ -0,0 +1,57 @@ + +## intent:affirm +- yes +- yep +- yeah +- indeed +- that's right +- ok +- great +- right, thank you +- correct +- great choice +- sounds really good + +## intent:goodbye +- farewell +- Bye bye +- have a good one + +## intent:restaurant_search +- i'm looking for a place to eat +- I want to grab lunch +- I am searching for a dinner spot +- i'm looking for a place in the [north](location) of town +- show me [chinese](cuisine) restaurants +- show me [chines](cuisine:chinese) restaurants in the [north](location) +- show me a [mexican](cuisine) place in the [centre](location) +- i am looking for an [indian](cuisine) spot +- search for restaurants +- anywhere in the [west](location) +- anywhere near [18328](location) +- I am looking for [asian fusion](cuisine) food +- I am looking a restaurant in [29432](location) +- I am looking for [mexican indian fusion](cuisine) +- [central](location) [indian](cuisine) restaurant + +## intent:chitchat/ask_name +- What's your name? +- What can I call you? + +## intent:chitchat/ask_weather +- How's the weather? +- Is it too hot outside? + +## synonym:chinese ++ Chines +* Chinese + +## synonym:vegetarian +- vegg +- veggie + +## regex:zipcode +- [0-9]{5} + +## regex:greet +- hey[^\s]* \ No newline at end of file diff --git a/data/test/lookup_tables/lookup_table.json b/data/test/lookup_tables/lookup_table.json index 33d974f4dc7f..fefba028797a 100644 --- a/data/test/lookup_tables/lookup_table.json +++ b/data/test/lookup_tables/lookup_table.json @@ -4,10 +4,6 @@ { "name": "plates", "elements": "data/test/lookup_tables/plates.txt" - }, - { - "name": "drinks", - "elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club mate"] } ], "common_examples": [ diff --git a/data/test/lookup_tables/lookup_table.md b/data/test/lookup_tables/lookup_table.md index de336a7ea759..a214cf9cfc69 100644 --- a/data/test/lookup_tables/lookup_table.md +++ b/data/test/lookup_tables/lookup_table.md @@ -6,10 +6,3 @@ ## lookup:plates data/test/lookup_tables/plates.txt - -## lookup:drinks - - mojito - - lemonade - - sweet berry wine - - tea - - club mate \ No newline at end of file diff --git a/data/test/many_intents.md b/data/test/many_intents.md new file mode 100644 index 000000000000..84a0e1f4b246 --- /dev/null +++ b/data/test/many_intents.md @@ -0,0 +1,64 @@ +## intent:handleinsult +- you are an idiot +- You lack understanding. + +## intent:grett +- Hello +- Hi +- Welcome + +## intent:thank +- Thanks +- Thank you + +## intent:telljoke +- Tell me something that you think will make me laugh. +- Entertain me with a joke. + +## intent:signup_newsletter +- I wanna sign up for the newsletter. +- I want to sign up for the newsletter. + +## intent:react_positive +- you are funny +- thats funny + +## intent:react_negative +- i am sad +- bad + +## intent:how_to_get_started +- how do I get started with rasa +- how do I use rasa + +## intent:technical_question +- what is duckling +- where to train intents in rasa? + +## intent:source_code +- how it works? +- where can i find this code + +## intent:pipeline_recommendation +- what pipeline should I start with? +- what is the right pipeline to choose? + +## intent:rasa_cost +- is rasa free +- are you really free + +## intent:nicetomeeyou +- It’s great connecting with you. +- Hi, nice to meet you! + +## intent:nlu_generation_tool_recommendation +- which tools can I use to create nlu data +- how can I get nlu data + +## intent:install_rasa +- I want to install Rasa Stack +- How to install Rasa? + +## intent:ask_which_events +- Which community events do you have +- Where can I meet Rasas \ No newline at end of file diff --git a/data/test/markdown_single_sections/empty_section.md b/data/test/markdown_single_sections/empty_section.md new file mode 100644 index 000000000000..6f021f6ffa2b --- /dev/null +++ b/data/test/markdown_single_sections/empty_section.md @@ -0,0 +1,8 @@ +## regex:greet +- hey[^\s]* + +## synonym:animal + +## lookup:chinese +- Chines +- Chinese \ No newline at end of file diff --git a/data/test/markdown_single_sections/incorrect_nlu_format.md b/data/test/markdown_single_sections/incorrect_nlu_format.md new file mode 100644 index 000000000000..11ab5e68f7cf --- /dev/null +++ b/data/test/markdown_single_sections/incorrect_nlu_format.md @@ -0,0 +1,4 @@ +## deny +- non, merci +- non merci +- non \ No newline at end of file diff --git a/data/test/markdown_single_sections/not_existing_section.md b/data/test/markdown_single_sections/not_existing_section.md new file mode 100644 index 000000000000..53d242c691d4 --- /dev/null +++ b/data/test/markdown_single_sections/not_existing_section.md @@ -0,0 +1,8 @@ +## synonym:animal + +## entity:human +- person + +## lookup:chinese +- Chines +- Chinese \ No newline at end of file diff --git a/data/test/markdown_single_sections/section_with_delimiter.md b/data/test/markdown_single_sections/section_with_delimiter.md new file mode 100644 index 000000000000..d02705bd5d44 --- /dev/null +++ b/data/test/markdown_single_sections/section_with_delimiter.md @@ -0,0 +1,2 @@ +## synonym:10:00 +- 10:00 am diff --git a/sample_configs/config_crf_custom_features.yml b/data/test_config/config_crf_custom_features.yml similarity index 100% rename from sample_configs/config_crf_custom_features.yml rename to data/test_config/config_crf_custom_features.yml diff --git a/data/test_config/config_crf_no_pattern_feature.yml b/data/test_config/config_crf_no_pattern_feature.yml new file mode 100644 index 000000000000..cac2cf1a96a2 --- /dev/null +++ b/data/test_config/config_crf_no_pattern_feature.yml @@ -0,0 +1,7 @@ +language: en +pipeline: + - name: "WhitespaceTokenizer" + - name: "RegexFeaturizer" + - name: "CRFEntityExtractor" + features: [['low', 'title', 'upper'],['bias', 'low', 'digit'],['low', 'title', 'upper']] + - name: "EntitySynonymMapper" \ No newline at end of file diff --git a/data/test_config/config_crf_no_regex.yml b/data/test_config/config_crf_no_regex.yml new file mode 100644 index 000000000000..3d916ede50d2 --- /dev/null +++ b/data/test_config/config_crf_no_regex.yml @@ -0,0 +1,5 @@ +language: en +pipeline: + - name: "WhitespaceTokenizer" + - name: "CRFEntityExtractor" + - name: "EntitySynonymMapper" \ No newline at end of file diff --git a/data/test_config/config_crf_no_synonyms.yml b/data/test_config/config_crf_no_synonyms.yml new file mode 100644 index 000000000000..543bc85a966c --- /dev/null +++ b/data/test_config/config_crf_no_synonyms.yml @@ -0,0 +1,4 @@ +language: en +pipeline: + - name: "WhitespaceTokenizer" + - name: "CRFEntityExtractor" \ No newline at end of file diff --git a/sample_configs/config_defaults.yml b/data/test_config/config_defaults.yml similarity index 100% rename from sample_configs/config_defaults.yml rename to data/test_config/config_defaults.yml diff --git a/data/test_config/config_embedding_intent_response_selector.yml b/data/test_config/config_embedding_intent_response_selector.yml new file mode 100644 index 000000000000..b2be5582ade5 --- /dev/null +++ b/data/test_config/config_embedding_intent_response_selector.yml @@ -0,0 +1,9 @@ +language: "en" + +pipeline: + - name: "WhitespaceTokenizer" + - name: "CountVectorsFeaturizer" + - name: "DIETClassifier" + epochs: 2 + - name: "ResponseSelector" + epochs: 2 diff --git a/data/test_config/config_empty.yml b/data/test_config/config_empty.yml new file mode 100644 index 000000000000..cfdf8c427e3d --- /dev/null +++ b/data/test_config/config_empty.yml @@ -0,0 +1,3 @@ +language: en +pipeline: +policies: diff --git a/data/test_config/config_empty_after_dumping.yml b/data/test_config/config_empty_after_dumping.yml new file mode 100644 index 000000000000..2c7694c1f0fc --- /dev/null +++ b/data/test_config/config_empty_after_dumping.yml @@ -0,0 +1,28 @@ +language: en +pipeline: +# # No configuration for the NLU pipeline was provided. The following default pipeline was used to train your model. +# # If you'd like to customize it, uncomment and adjust the pipeline. +# # See https://rasa.com/docs/rasa/nlu/choosing-a-pipeline/ for more information. +# - name: ConveRTTokenizer +# - name: ConveRTFeaturizer +# - name: RegexFeaturizer +# - name: LexicalSyntacticFeaturizer +# - name: CountVectorsFeaturizer +# - name: CountVectorsFeaturizer +# analyzer: char_wb +# min_ngram: 1 +# max_ngram: 4 +# - name: DIETClassifier +# epochs: 100 +# - name: EntitySynonymMapper +# - name: ResponseSelector +# epochs: 100 +policies: +# # No configuration for policies was provided. The following default policies were used to train your model. +# # If you'd like to customize them, uncomment and adjust the policies. +# # See https://rasa.com/docs/rasa/core/policies/ for more information. +# - name: MemoizationPolicy +# - name: TEDPolicy +# max_history: 5 +# epochs: 100 +# - name: RulePolicy diff --git a/data/test_config/config_empty_after_dumping_windows.yml b/data/test_config/config_empty_after_dumping_windows.yml new file mode 100644 index 000000000000..e9a69abd05f5 --- /dev/null +++ b/data/test_config/config_empty_after_dumping_windows.yml @@ -0,0 +1,27 @@ +language: en +pipeline: +# # No configuration for the NLU pipeline was provided. The following default pipeline was used to train your model. +# # If you'd like to customize it, uncomment and adjust the pipeline. +# # See https://rasa.com/docs/rasa/nlu/choosing-a-pipeline/ for more information. +# - name: WhitespaceTokenizer +# - name: RegexFeaturizer +# - name: LexicalSyntacticFeaturizer +# - name: CountVectorsFeaturizer +# - name: CountVectorsFeaturizer +# analyzer: char_wb +# min_ngram: 1 +# max_ngram: 4 +# - name: DIETClassifier +# epochs: 100 +# - name: EntitySynonymMapper +# - name: ResponseSelector +# epochs: 100 +policies: +# # No configuration for policies was provided. The following default policies were used to train your model. +# # If you'd like to customize them, uncomment and adjust the policies. +# # See https://rasa.com/docs/rasa/core/policies/ for more information. +# - name: MemoizationPolicy +# - name: TEDPolicy +# max_history: 5 +# epochs: 100 +# - name: RulePolicy diff --git a/data/test_config/config_language_only.yml b/data/test_config/config_language_only.yml new file mode 100644 index 000000000000..d867310af68b --- /dev/null +++ b/data/test_config/config_language_only.yml @@ -0,0 +1 @@ +language: en diff --git a/data/test_config/config_pipeline_empty.yml b/data/test_config/config_pipeline_empty.yml new file mode 100644 index 000000000000..cf67ddb35ac7 --- /dev/null +++ b/data/test_config/config_pipeline_empty.yml @@ -0,0 +1,17 @@ +language: en + +pipeline: + +policies: + - name: "KerasPolicy" + featurizer: + - name: MaxHistoryTrackerFeaturizer + max_history: 5 + state_featurizer: + - name: BinarySingleStateFeaturizer + - name: "MemoizationPolicy" + max_history: 5 + - name: "FallbackPolicy" + nlu_threshold: 0.4 + core_threshold: 0.3 + fallback_action_name: "my_fallback_action" diff --git a/data/test_config/config_pipeline_missing.yml b/data/test_config/config_pipeline_missing.yml new file mode 100644 index 000000000000..86e126400d16 --- /dev/null +++ b/data/test_config/config_pipeline_missing.yml @@ -0,0 +1,15 @@ +language: en + +policies: + - name: "KerasPolicy" + featurizer: + - name: MaxHistoryTrackerFeaturizer + max_history: 5 + state_featurizer: + - name: BinarySingleStateFeaturizer + - name: "MemoizationPolicy" + max_history: 5 + - name: "FallbackPolicy" + nlu_threshold: 0.4 + core_threshold: 0.3 + fallback_action_name: "my_fallback_action" diff --git a/data/test_config/config_policies_empty.yml b/data/test_config/config_policies_empty.yml new file mode 100644 index 000000000000..7c201969d7a4 --- /dev/null +++ b/data/test_config/config_policies_empty.yml @@ -0,0 +1,19 @@ +language: en + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 + +policies: diff --git a/data/test_config/config_policies_missing.yml b/data/test_config/config_policies_missing.yml new file mode 100644 index 000000000000..8cf8f6d7954d --- /dev/null +++ b/data/test_config/config_policies_missing.yml @@ -0,0 +1,17 @@ +language: en + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 diff --git a/data/test_config/config_pretrained_embeddings_convert.yml b/data/test_config/config_pretrained_embeddings_convert.yml new file mode 100644 index 000000000000..7f5fb5f8e34c --- /dev/null +++ b/data/test_config/config_pretrained_embeddings_convert.yml @@ -0,0 +1,6 @@ +language: "en" + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: DIETClassifier diff --git a/data/test_config/config_pretrained_embeddings_mitie.yml b/data/test_config/config_pretrained_embeddings_mitie.yml new file mode 100644 index 000000000000..d1b8b86dd953 --- /dev/null +++ b/data/test_config/config_pretrained_embeddings_mitie.yml @@ -0,0 +1,11 @@ +language: "en" + +pipeline: +- name: "MitieNLP" + model: "data/total_word_feature_extractor.dat" +- name: "MitieTokenizer" +- name: "MitieEntityExtractor" +- name: "EntitySynonymMapper" +- name: "RegexFeaturizer" +- name: "MitieFeaturizer" +- name: "SklearnIntentClassifier" diff --git a/data/test_config/config_pretrained_embeddings_mitie_2.yml b/data/test_config/config_pretrained_embeddings_mitie_2.yml new file mode 100644 index 000000000000..356eb898e812 --- /dev/null +++ b/data/test_config/config_pretrained_embeddings_mitie_2.yml @@ -0,0 +1,10 @@ +language: "en" + +pipeline: +- name: "MitieNLP" + model: "data/total_word_feature_extractor.dat" +- name: "MitieTokenizer" +- name: "MitieEntityExtractor" +- name: "EntitySynonymMapper" +- name: "RegexFeaturizer" +- name: "MitieIntentClassifier" diff --git a/sample_configs/config_pretrained_embeddings_mitie_zh.yml b/data/test_config/config_pretrained_embeddings_mitie_zh.yml similarity index 100% rename from sample_configs/config_pretrained_embeddings_mitie_zh.yml rename to data/test_config/config_pretrained_embeddings_mitie_zh.yml diff --git a/data/test_config/config_pretrained_embeddings_spacy.yml b/data/test_config/config_pretrained_embeddings_spacy.yml new file mode 100644 index 000000000000..c5380326dafe --- /dev/null +++ b/data/test_config/config_pretrained_embeddings_spacy.yml @@ -0,0 +1,10 @@ +language: "en" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: SklearnIntentClassifier diff --git a/data/test_config/config_pretrained_embeddings_spacy_de.yml b/data/test_config/config_pretrained_embeddings_spacy_de.yml new file mode 100644 index 000000000000..c3e337132ece --- /dev/null +++ b/data/test_config/config_pretrained_embeddings_spacy_de.yml @@ -0,0 +1,10 @@ +language: "de" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: SklearnIntentClassifier diff --git a/sample_configs/config_pretrained_embeddings_spacy_duckling.yml b/data/test_config/config_pretrained_embeddings_spacy_duckling.yml similarity index 100% rename from sample_configs/config_pretrained_embeddings_spacy_duckling.yml rename to data/test_config/config_pretrained_embeddings_spacy_duckling.yml diff --git a/data/test_config/config_spacy_entity_extractor.yml b/data/test_config/config_spacy_entity_extractor.yml new file mode 100644 index 000000000000..26b4fc84922d --- /dev/null +++ b/data/test_config/config_spacy_entity_extractor.yml @@ -0,0 +1,8 @@ +language: en +pipeline: + - name: "SpacyNLP" + - name: "SpacyTokenizer" + - name: "SpacyFeaturizer" + - name: "RegexFeaturizer" + - name: "SpacyEntityExtractor" + - name: "EntitySynonymMapper" \ No newline at end of file diff --git a/data/test_config/config_supervised_embeddings.yml b/data/test_config/config_supervised_embeddings.yml new file mode 100644 index 000000000000..c3bd3a2b51a6 --- /dev/null +++ b/data/test_config/config_supervised_embeddings.yml @@ -0,0 +1,13 @@ +language: "en" + +pipeline: + - name: WhitespaceTokenizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier diff --git a/sample_configs/config_supervised_embeddings_duckling.yml b/data/test_config/config_supervised_embeddings_duckling.yml similarity index 76% rename from sample_configs/config_supervised_embeddings_duckling.yml rename to data/test_config/config_supervised_embeddings_duckling.yml index c1771ea5addc..791b93dd89be 100644 --- a/sample_configs/config_supervised_embeddings_duckling.yml +++ b/data/test_config/config_supervised_embeddings_duckling.yml @@ -2,6 +2,7 @@ language: "en" pipeline: - name: "CountVectorsFeaturizer" -- name: "EmbeddingIntentClassifier" +- name: "DIETClassifier" + epochs: 2 - name: "DucklingHTTPExtractor" url: "http://duckling:8000" diff --git a/sample_configs/config_train_server_json.yml b/data/test_config/config_train_server_json.yml similarity index 55% rename from sample_configs/config_train_server_json.yml rename to data/test_config/config_train_server_json.yml index d2c5b6ed132f..257986275cc0 100644 --- a/sample_configs/config_train_server_json.yml +++ b/data/test_config/config_train_server_json.yml @@ -1,6 +1,13 @@ language: "en" -pipeline: "pretrained_embeddings_spacy" +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: SklearnIntentClassifier # data contains the same json, as described in the training data section data: { diff --git a/data/test_config/config_train_server_md.yml b/data/test_config/config_train_server_md.yml new file mode 100644 index 000000000000..d440f4ea1a31 --- /dev/null +++ b/data/test_config/config_train_server_md.yml @@ -0,0 +1,20 @@ +language: "en" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: SklearnIntentClassifier + +# data contains the same md, as described in the training data section +data: | + ## intent:affirm + - yes + - yep + + ## intent:goodbye + - bye + - goodbye diff --git a/data/test_config/config_with_comments.yml b/data/test_config/config_with_comments.yml new file mode 100644 index 000000000000..195ee51fee4b --- /dev/null +++ b/data/test_config/config_with_comments.yml @@ -0,0 +1,25 @@ +# here is some comment +language: en + +# another comment +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 +# all the comments +# so many +policies: # even here +# this one will disappear + +# comments everywhere diff --git a/data/test_config/config_with_comments_after_dumping.yml b/data/test_config/config_with_comments_after_dumping.yml new file mode 100644 index 000000000000..f499b734317b --- /dev/null +++ b/data/test_config/config_with_comments_after_dumping.yml @@ -0,0 +1,32 @@ +# here is some comment +language: en + +# another comment +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 +# all the comments +# so many +policies: # even here +# # No configuration for policies was provided. The following default policies were used to train your model. +# # If you'd like to customize them, uncomment and adjust the policies. +# # See https://rasa.com/docs/rasa/core/policies/ for more information. +# - name: MemoizationPolicy +# - name: TEDPolicy +# max_history: 5 +# epochs: 100 +# - name: RulePolicy + +# comments everywhere diff --git a/data/test_config/embedding_random_seed.yaml b/data/test_config/embedding_random_seed.yaml index c2bd5bb86918..53ee82926eb3 100644 --- a/data/test_config/embedding_random_seed.yaml +++ b/data/test_config/embedding_random_seed.yaml @@ -1,3 +1,4 @@ policies: -- name: EmbeddingPolicy +- name: TEDPolicy random_seed: 42 + epochs: 2 diff --git a/data/test_config/keyword_classifier_config.yml b/data/test_config/keyword_classifier_config.yml new file mode 100644 index 000000000000..c6a6eca5ef61 --- /dev/null +++ b/data/test_config/keyword_classifier_config.yml @@ -0,0 +1,2 @@ +pipeline: + - name: "KeywordIntentClassifier" diff --git a/data/test_config/max_hist_config.yml b/data/test_config/max_hist_config.yml index 04acbe381034..3e86d53b91f1 100644 --- a/data/test_config/max_hist_config.yml +++ b/data/test_config/max_hist_config.yml @@ -1,5 +1,5 @@ policies: - name: MemoizationPolicy max_history: 5 - - name: KerasPolicy + - name: TEDPolicy max_history: 5 diff --git a/data/test_config/no_max_hist_config.yml b/data/test_config/no_max_hist_config.yml index 5ea9e2c00521..3d478a200115 100644 --- a/data/test_config/no_max_hist_config.yml +++ b/data/test_config/no_max_hist_config.yml @@ -1,3 +1,3 @@ policies: - name: MemoizationPolicy - - name: KerasPolicy + - name: TEDPolicy diff --git a/data/test_config/stack_config.yml b/data/test_config/stack_config.yml index 76e392eb11e2..6f778556ba8f 100644 --- a/data/test_config/stack_config.yml +++ b/data/test_config/stack_config.yml @@ -1,7 +1,8 @@ # Configuration for Rasa NLU. # https://rasa.com/docs/rasa/nlu/components/ language: en -pipeline: keyword +pipeline: + - name: KeywordIntentClassifier # Configuration for Rasa Core. # https://rasa.com/docs/rasa/core/policies/ diff --git a/data/test_config/keras_random_seed.yaml b/data/test_config/ted_random_seed.yaml similarity index 58% rename from data/test_config/keras_random_seed.yaml rename to data/test_config/ted_random_seed.yaml index f91967d3251a..0bcf2dba2827 100644 --- a/data/test_config/keras_random_seed.yaml +++ b/data/test_config/ted_random_seed.yaml @@ -1,4 +1,5 @@ policies: -- name: KerasPolicy +- name: TEDPolicy random_seed: 42 validation_split: 0 + max_history: 5 diff --git a/data/test_dialogues/formbot.json b/data/test_dialogues/formbot.json index 80201d1d8bbb..d793f1aa322d 100644 --- a/data/test_dialogues/formbot.json +++ b/data/test_dialogues/formbot.json @@ -173,7 +173,7 @@ "unpredictable":false }, { - "py/object":"rasa.core.events.Form", + "py/object":"rasa.core.events.ActiveLoop", "name":null, "timestamp":1551884214.9510589 }, @@ -187,7 +187,7 @@ "py/object":"rasa.core.events.ActionExecuted", "action_name":"action_listen", "confidence":0.7680902069097734, - "policy":"policy_0_KerasPolicy", + "policy":"policy_0_TEDPolicy", "timestamp":1551884216.705635, "unpredictable":false } diff --git a/data/test_dialogues/restaurantbot.json b/data/test_dialogues/restaurantbot.json deleted file mode 100644 index 29fe52ef864f..000000000000 --- a/data/test_dialogues/restaurantbot.json +++ /dev/null @@ -1,584 +0,0 @@ -{ - "py/object":"rasa.core.conversation.Dialogue", - "name":"restaurantbot", - "events":[ - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":null, - "policy":null, - "timestamp":1551884318.808505, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - - ], - "input_channel":null, - "intent":{ - "confidence":0.9934019069423592, - "name":"greet" - }, - "message_id":null, - "parse_data":{ - "entities":[ - - ], - "intent":{ - "confidence":0.9934019069423592, - "name":"greet" - }, - "intent_ranking":[ - { - "confidence":0.9934019069423592, - "name":"greet" - }, - { - "confidence":0.0023415728200814737, - "name":"thankyou" - }, - { - "confidence":0.0016331428022959985, - "name":"affirm" - }, - { - "confidence":0.0014654355039414742, - "name":"request_info" - }, - { - "confidence":0.0007807232364648854, - "name":"deny" - }, - { - "confidence":0.00037721869485698555, - "name":"inform" - } - ], - "text":"hi" - }, - "text":"hi", - "timestamp":1551884474.021491 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ask_howcanhelp", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884482.0585139, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"how can I help you?", - "timestamp":1551884482.058523 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884484.927216, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - { - "end":50, - "entity":"cuisine", - "start":43, - "value":"italian" - }, - { - "end":70, - "entity":"location", - "start":65, - "value":"paris" - } - ], - "input_channel":null, - "intent":{ - "confidence":0.8721412706843699, - "name":"inform" - }, - "message_id":null, - "parse_data":{ - "entities":[ - { - "end":50, - "entity":"cuisine", - "start":43, - "value":"italian" - }, - { - "end":70, - "entity":"location", - "start":65, - "value":"paris" - } - ], - "intent":{ - "confidence":0.8721412706843699, - "name":"inform" - }, - "text":"Glad you asked, I've been searching for an italian restaurant in paris for the last 3 days" - }, - "text":"Glad you asked, I've been searching for an italian restaurant in paris for the last 3 days", - "timestamp":1551884531.914144 - }, - { - "py/object":"rasa.core.events.SlotSet", - "key":"cuisine", - "timestamp":1551884531.9141529, - "value":"italian" - }, - { - "py/object":"rasa.core.events.SlotSet", - "key":"location", - "timestamp":1551884531.9141572, - "value":"paris" - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_on_it", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884534.021868, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"I'm on it", - "timestamp":1551884534.021875 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ask_numpeople", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884538.19069, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"for how many people?", - "timestamp":1551884538.190698 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884541.604692, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - { - "end":20, - "entity":"people", - "start":16, - "value":"four" - } - ], - "input_channel":null, - "intent":{ - "confidence":0.767022076375302, - "name":"inform" - }, - "message_id":null, - "parse_data":{ - "entities":[ - { - "end":20, - "entity":"people", - "start":16, - "value":"four" - } - ], - "intent":{ - "confidence":0.767022076375302, - "name":"inform" - }, - "text":"I'm coming with four people" - }, - "text":"I'm coming with four people", - "timestamp":1551884558.849935 - }, - { - "py/object":"rasa.core.events.SlotSet", - "key":"people", - "timestamp":1551884558.849946, - "value":"four" - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ask_price", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884561.016474, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":[ - { - "payload":"cheap", - "title":"cheap" - }, - { - "payload":"expensive", - "title":"expensive" - } - ], - "elements":null - }, - "text":"in which price range?", - "timestamp":1551884561.016482 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884563.646207, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - { - "confidence":0.839291340764181, - "end":11, - "entity":"price", - "extractor":"CRFEntityExtractor", - "processors":[ - "EntitySynonymMapper" - ], - "start":6, - "value":"lo" - } - ], - "input_channel":null, - "intent":{ - "confidence":0.5050531724307868, - "name":"affirm" - }, - "message_id":null, - "parse_data":{ - "entities":[ - { - "confidence":0.839291340764181, - "end":11, - "entity":"price", - "extractor":"CRFEntityExtractor", - "processors":[ - "EntitySynonymMapper" - ], - "start":6, - "value":"lo" - } - ], - "intent":{ - "confidence":0.5050531724307868, - "name":"affirm" - }, - "intent_ranking":[ - { - "confidence":0.5050531724307868, - "name":"affirm" - }, - { - "confidence":0.42281451260184255, - "name":"inform" - }, - { - "confidence":0.04421589956251055, - "name":"deny" - }, - { - "confidence":0.015174350052667275, - "name":"thankyou" - }, - { - "confidence":0.010163478737908526, - "name":"greet" - }, - { - "confidence":0.00257858661428383, - "name":"request_info" - } - ], - "text":"I'm a cheap guy" - }, - "text":"I'm a cheap guy", - "timestamp":1551884568.883223 - }, - { - "py/object":"rasa.core.events.SlotSet", - "key":"price", - "timestamp":1551884568.883248, - "value":"lo" - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ask_moreupdates", - "confidence":0.9925360233665671, - "policy":"policy_0_KerasPolicy", - "timestamp":1551884573.494926, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"if you'd like to modify anything else, please tell me what. This is what I currently have: paris (price: lo, cuisine: italian) for four people.", - "timestamp":1551884573.494934 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":0.9998248227762471, - "policy":"policy_0_KerasPolicy", - "timestamp":1551884576.519327, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - - ], - "input_channel":null, - "intent":{ - "confidence":0.007820649756508936, - "name":"deny" - }, - "message_id":null, - "parse_data":{ - "entities":[ - - ], - "intent":{ - "confidence":0.007820649756508936, - "name":"deny" - }, - "text":"Nope" - }, - "text":"Nope", - "timestamp":1551884585.142624 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ack_dosearch", - "confidence":null, - "policy":null, - "timestamp":1551884596.275981, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"ok let me see what I can find", - "timestamp":1551884596.275989 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_search_restaurants", - "confidence":0.9959966060902363, - "policy":"policy_0_KerasPolicy", - "timestamp":1551884599.403338, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_suggest", - "confidence":0.9632275734738791, - "policy":"policy_0_KerasPolicy", - "timestamp":1551884602.426846, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":0.9950680237822371, - "policy":"policy_0_KerasPolicy", - "timestamp":1551884605.8911119, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - - ], - "input_channel":null, - "intent":{ - "confidence":0.7645159564856417, - "name":"affirm" - }, - "message_id":null, - "parse_data":{ - "entities":[ - - ], - "intent":{ - "confidence":0.7645159564856417, - "name":"affirm" - }, - "intent_ranking":[ - { - "confidence":0.7645159564856417, - "name":"affirm" - }, - { - "confidence":0.12226169938305291, - "name":"greet" - }, - { - "confidence":0.05743747849472287, - "name":"thankyou" - }, - { - "confidence":0.037039845161177185, - "name":"inform" - }, - { - "confidence":0.013406223916224952, - "name":"deny" - }, - { - "confidence":0.005338796559180861, - "name":"request_info" - } - ], - "text":"Yep, perfect" - }, - "text":"Yep, perfect", - "timestamp":1551884610.116241 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_ack_makereservation", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884614.104856, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":[ - { - "payload":"thank you", - "title":"thank you" - } - ], - "elements":null - }, - "text":"ok making a reservation for restaurant (price=lo cuisine=italian) in location=paris for count=four? ", - "timestamp":1551884614.104864 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"action_listen", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884617.239556, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.UserUttered", - "entities":[ - - ], - "input_channel":null, - "intent":{ - "confidence":0.6540265957854002, - "name":"thankyou" - }, - "message_id":null, - "parse_data":{ - "entities":[ - - ], - "intent":{ - "confidence":0.6540265957854002, - "name":"thankyou" - }, - "intent_ranking":[ - { - "confidence":0.6540265957854002, - "name":"thankyou" - }, - { - "confidence":0.22113942715759172, - "name":"greet" - }, - { - "confidence":0.09427645899747812, - "name":"affirm" - }, - { - "confidence":0.022935123102150762, - "name":"deny" - }, - { - "confidence":0.0039277032314805225, - "name":"inform" - }, - { - "confidence":0.003694691725898955, - "name":"request_info" - } - ], - "text":"Thanks pal" - }, - "text":"Thanks pal", - "timestamp":1551884620.3249972 - }, - { - "py/object":"rasa.core.events.ActionExecuted", - "action_name":"utter_goodbye", - "confidence":1.0, - "policy":"policy_2_MemoizationPolicy", - "timestamp":1551884623.90435, - "unpredictable":false - }, - { - "py/object":"rasa.core.events.BotUttered", - "data":{ - "attachment":null, - "buttons":null, - "elements":null - }, - "text":"goodbye :(", - "timestamp":1551884623.904358 - } - ] -} \ No newline at end of file diff --git a/data/test_domains/default.yml b/data/test_domains/default.yml index 29bdc698528a..82da02d3b117 100644 --- a/data/test_domains/default.yml +++ b/data/test_domains/default.yml @@ -12,15 +12,10 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: - text: goodbye :( utter_default: - text: default message - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/default_deprecated_templates.yml b/data/test_domains/default_deprecated_templates.yml new file mode 100644 index 000000000000..40f18dc2c977 --- /dev/null +++ b/data/test_domains/default_deprecated_templates.yml @@ -0,0 +1,26 @@ +intents: + - greet: {use_entities: [name]} + - default: {ignore_entities : [unrelated_recognized_entity]} + - goodbye: {use_entities: null} + - thank: {use_entities: False} + - ask: {use_entities: True} + - why: {use_entities: []} + - pure_intent + +entities: + - name + - unrelated_recognized_entity + - other + +templates: + utter_greet: + - hey there! + utter_goodbye: + - goodbye :( + utter_default: + - default message + +actions: + - utter_default + - utter_greet + - utter_goodbye diff --git a/data/test_domains/default_retrieval_intents.yml b/data/test_domains/default_retrieval_intents.yml new file mode 100644 index 000000000000..4d41d889716c --- /dev/null +++ b/data/test_domains/default_retrieval_intents.yml @@ -0,0 +1,35 @@ +intents: + - greet + - goodbye + - affirm + - deny + - mood_great + - mood_unhappy + - bot_challenge + - chitchat + - chitchat/ask_name + - chitchat/ask_weather + +responses: + utter_greet: + - text: Hey! How are you? + utter_cheer_up: + - text: 'Here is something to cheer you up:' + image: https://i.imgur.com/nGF1K8f.jpg + utter_did_that_help: + - text: Did that help you? + utter_happy: + - text: Great, carry on! + utter_goodbye: + - text: Bye + utter_iamabot: + - text: I am a bot, powered by Rasa. + +actions: + - respond_chitchat + - utter_greet + - utter_cheer_up + - utter_did_that_help + - utter_happy + - utter_goodbye + - utter_iamabot diff --git a/data/test_domains/default_unfeaturized_entities.yml b/data/test_domains/default_unfeaturized_entities.yml index debefea33b09..2d0a4672efbd 100644 --- a/data/test_domains/default_unfeaturized_entities.yml +++ b/data/test_domains/default_unfeaturized_entities.yml @@ -12,15 +12,10 @@ entities: - unrelated_recognized_entity - other -templates: +responses: utter_greet: - hey there! utter_goodbye: - goodbye :( utter_default: - default message - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/default_with_mapping.yml b/data/test_domains/default_with_mapping.yml index 42ca0cae7d7f..df408b123a6c 100644 --- a/data/test_domains/default_with_mapping.yml +++ b/data/test_domains/default_with_mapping.yml @@ -14,15 +14,10 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: - text: goodbye :( utter_default: - text: default message - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/default_with_slots.yml b/data/test_domains/default_with_slots.yml index 62f0474c4fdf..3083ae158c06 100644 --- a/data/test_domains/default_with_slots.yml +++ b/data/test_domains/default_with_slots.yml @@ -18,7 +18,7 @@ slots: name: type: text -templates: +responses: utter_greet: - text: "hey there {name}!" # {name} will be filled by slot (same name) or by custom action utter_channel: @@ -26,12 +26,7 @@ templates: - text: "you're talking to me on slack!" # if you define channel-specific utterances, the bot will pick channel: "slack" # from those when talking on that specific channel utter_goodbye: - - text: "goodbye 😢" # multiple templates - bot will randomly pick one of them + - text: "goodbye 😢" # multiple responses - bot will randomly pick one of them - text: "bye bye 😢" utter_default: # utterance sent by action_default_fallback - text: "sorry, I didn't get that, can you rephrase it?" - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/default_with_slots_and_no_actions.yml b/data/test_domains/default_with_slots_and_no_actions.yml new file mode 100644 index 000000000000..3bf7fc9aa31e --- /dev/null +++ b/data/test_domains/default_with_slots_and_no_actions.yml @@ -0,0 +1,32 @@ +# all hashtags are comments :) +intents: + - greet + - default + - goodbye + - affirm + - thank_you + - change_bank_details + - simple + - hello + - why + - next_intent + +entities: + - name + +slots: + name: + type: text + +responses: + utter_greet: + - text: "hey there {name}!" # {name} will be filled by slot (same name) or by custom action + utter_channel: + - text: "this is a default channel" + - text: "you're talking to me on slack!" # if you define channel-specific utterances, the bot will pick + channel: "slack" # from those when talking on that specific channel + utter_goodbye: + - text: "goodbye 😢" # multiple templates - bot will randomly pick one of them + - text: "bye bye 😢" + utter_default: # utterance sent by action_default_fallback + - text: "sorry, I didn't get that, can you rephrase it?" diff --git a/data/test_domains/duplicate_actions.yml b/data/test_domains/duplicate_actions.yml index fbd0ef7941b5..cf346c77a8a8 100644 --- a/data/test_domains/duplicate_actions.yml +++ b/data/test_domains/duplicate_actions.yml @@ -12,7 +12,7 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: diff --git a/data/test_domains/duplicate_entities.yml b/data/test_domains/duplicate_entities.yml index 4ecf0e41df93..2a5e482ba7cd 100644 --- a/data/test_domains/duplicate_entities.yml +++ b/data/test_domains/duplicate_entities.yml @@ -15,7 +15,7 @@ entities: - name - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: @@ -23,7 +23,3 @@ templates: utter_default: - text: default message -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/duplicate_intents.yml b/data/test_domains/duplicate_intents.yml index a60f4c935dba..4341f589a3b5 100644 --- a/data/test_domains/duplicate_intents.yml +++ b/data/test_domains/duplicate_intents.yml @@ -14,7 +14,7 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: @@ -22,7 +22,6 @@ templates: utter_default: - text: default message -actions: - - utter_default - - utter_greet - - utter_goodbye +session_config: + session_expiration_time: 60 + carry_over_slots_to_new_session: true diff --git a/data/test_domains/duplicate_templates.yml b/data/test_domains/duplicate_templates.yml index 176e94e1152e..3d0fbe10cb5b 100644 --- a/data/test_domains/duplicate_templates.yml +++ b/data/test_domains/duplicate_templates.yml @@ -12,7 +12,7 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: @@ -21,8 +21,3 @@ templates: - text: default message utter_greet: - text: hey there! - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/form.yml b/data/test_domains/form.yml index 955267c09ef6..d2ce5b2bc5ea 100644 --- a/data/test_domains/form.yml +++ b/data/test_domains/form.yml @@ -17,19 +17,15 @@ slots: entities: - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: - text: goodbye :( utter_default: - text: default message - -actions: - - utter_default - - utter_greet - - utter_goodbye - - utter_ask_continue + utter_ask_continue: + - text: should I continue? forms: - some_form \ No newline at end of file diff --git a/data/test_domains/invalid_format.yml b/data/test_domains/invalid_format.yml index d5edda36a0ff..3f846162e0d3 100644 --- a/data/test_domains/invalid_format.yml +++ b/data/test_domains/invalid_format.yml @@ -12,7 +12,7 @@ slots: entities - name -templates: +responses: utter_greet: - text: hey there! utter_goodbye: diff --git a/data/test_domains/missing_text_for_templates.yml b/data/test_domains/missing_text_for_templates.yml index 9b01aa7f26e0..0d478179bdc7 100644 --- a/data/test_domains/missing_text_for_templates.yml +++ b/data/test_domains/missing_text_for_templates.yml @@ -3,15 +3,10 @@ intents: - default - goodbye -templates: +responses: utter_greet: - hey there! utter_goodbye: - goodbye :( utter_default: - default message - -actions: - - utter_default - - utter_greet - - utter_goodbye diff --git a/data/test_domains/people_form.yml b/data/test_domains/people_form.yml index 3dae366640f5..09bb66a92777 100644 --- a/data/test_domains/people_form.yml +++ b/data/test_domains/people_form.yml @@ -9,7 +9,7 @@ slots: requested_slot: type: unfeaturized -templates: +responses: utter_ask_person_name: - text: "what's the name of the person you're looking for?" diff --git a/data/test_domains/query_form.yml b/data/test_domains/query_form.yml index 82044058de13..986052840b46 100644 --- a/data/test_domains/query_form.yml +++ b/data/test_domains/query_form.yml @@ -10,7 +10,7 @@ slots: requested_slot: type: unfeaturized -templates: +responses: utter_ask_username: - text: "what is your name?" utter_ask_query: diff --git a/data/test_domains/restaurant_form.yml b/data/test_domains/restaurant_form.yml index 720f70be8d5d..1cb031b601f5 100644 --- a/data/test_domains/restaurant_form.yml +++ b/data/test_domains/restaurant_form.yml @@ -18,7 +18,7 @@ slots: search_results: type: unfeaturized -templates: +responses: utter_ask_people: - text: "for how many people?" utter_ask_cuisine: diff --git a/data/test_domains/travel_form.yml b/data/test_domains/travel_form.yml index d5e841e49215..41862a37a1e7 100644 --- a/data/test_domains/travel_form.yml +++ b/data/test_domains/travel_form.yml @@ -13,7 +13,7 @@ slots: requested_slot: type: unfeaturized -templates: +responses: utter_ask_GPE_origin: - text: "where are you leaving from?" utter_ask_GPE_destination: diff --git a/data/test_endpoints/event_brokers/pika_endpoint.yml b/data/test_endpoints/event_brokers/pika_endpoint.yml index 9c23e6c3dffc..24f9ac34df60 100644 --- a/data/test_endpoints/event_brokers/pika_endpoint.yml +++ b/data/test_endpoints/event_brokers/pika_endpoint.yml @@ -1,6 +1,10 @@ event_broker: + type: pika url: localhost username: username password: password - queue: queue - type: pika + queues: + - queue-1 +# you may supply more than one queue to publish to +# - queue-2 +# - queue-3 diff --git a/data/test_endpoints/example_endpoints.yml b/data/test_endpoints/example_endpoints.yml index fe27fc94d1f4..455e5d58b2f9 100644 --- a/data/test_endpoints/example_endpoints.yml +++ b/data/test_endpoints/example_endpoints.yml @@ -22,4 +22,3 @@ tracker_store: #db: rasa #user: username #password: password - \ No newline at end of file diff --git a/data/test_evaluations/end_to_end_story.md b/data/test_evaluations/end_to_end_story.md index 0600e9a9da41..c2933a3211b9 100644 --- a/data/test_evaluations/end_to_end_story.md +++ b/data/test_evaluations/end_to_end_story.md @@ -1,27 +1,17 @@ ## simple_story_with_only_start -* greet: Hello - - utter_ask_howcanhelp +* greet: /greet + - utter_greet ## simple_story_with_multiple_turns -* greet: good morning - - utter_ask_howcanhelp -* inform: im looking for a [moderately](price:moderate) priced restaurant in the [east](location) part of town - - utter_on_it - - utter_ask_cuisine -* inform: [french](cuisine) food - - utter_ask_numpeople +* greet: /greet + - utter_greet +* default: /default + - utter_default + * goodbye: /goodbye + - utter_goodbye ## story_with_multiple_entities_correction_and_search -* greet: hello - - utter_ask_howcanhelp -* inform: im looking for a [cheap](price:lo) restaurant which has [french](cuisine) food and is located in [bombay](location) - - utter_on_it - - utter_ask_numpeople -* inform: for [six](people:6) please - - utter_ask_moreupdates -* inform: actually i need a [moderately](price:moderate) priced restaurant - - utter_ask_moreupdates -* deny: no - - utter_ack_dosearch - - action_search_restaurants - - action_suggest \ No newline at end of file +* greet: /greet{"name": "Max"} + - utter_greet +* default: /default + - utter_default diff --git a/data/test_evaluations/end_to_end_trips_circuit_breaker.md b/data/test_evaluations/end_to_end_trips_circuit_breaker.md new file mode 100644 index 000000000000..eafcf5252aa5 --- /dev/null +++ b/data/test_evaluations/end_to_end_trips_circuit_breaker.md @@ -0,0 +1,13 @@ +## story_trips_circuit_breaker +* greet: /greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet diff --git a/data/test_evaluations/form-end-to-end-stories.md b/data/test_evaluations/form-end-to-end-stories.md new file mode 100644 index 000000000000..a90a3dfa62df --- /dev/null +++ b/data/test_evaluations/form-end-to-end-stories.md @@ -0,0 +1,40 @@ +## Happy path +* greet: /greet + - utter_greet +* request_restaurant: /request_restaurant + - restaurant_form + - form{"name": "restaurant_form"} + - form{"name": null} + - utter_submit + - utter_slots_values +* thankyou: /thankyou + - utter_noworries + +## Happy path with form prefix +* greet: /greet + - utter_greet +* request_restaurant: /request_restaurant + - restaurant_form + - form{"name": "restaurant_form"} +* form: /inform{"cuisine": "afghan"} + - form: restaurant_form + - form{"name": null} + - utter_submit + - utter_slots_values +* thankyou: /thankyou + - utter_noworries + +## unhappy path +* greet: /greet + - utter_greet +* request_restaurant: /request_restaurant + - restaurant_form + - form{"name": "restaurant_form"} +* chitchat: /chitchat + - utter_chitchat + - restaurant_form + - form{"name": null} + - utter_submit + - utter_slots_values +* thankyou: /thankyou + - utter_noworries diff --git a/data/test_evaluations/stories_trip_circuit_breaker.md b/data/test_evaluations/stories_trip_circuit_breaker.md new file mode 100644 index 000000000000..c9d3c6d820f0 --- /dev/null +++ b/data/test_evaluations/stories_trip_circuit_breaker.md @@ -0,0 +1,13 @@ +## story_trips_circuit_breaker +* greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet + - utter_greet diff --git a/data/test_mixed_yaml_md_stories/stories_part_1.yml b/data/test_mixed_yaml_md_stories/stories_part_1.yml new file mode 100644 index 000000000000..233fd9d4980f --- /dev/null +++ b/data/test_mixed_yaml_md_stories/stories_part_1.yml @@ -0,0 +1,20 @@ +stories: +- story: simple_story_without_checkpoint + steps: + - intent: simple + - action: utter_default + - action: utter_greet + +- story: simple_story_with_only_start + steps: + - checkpoint: check_greet # checkpoints at the start define entry points + - intent: simple + - action: utter_default + +- story: simple_story_with_only_end + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter + - checkpoint: check_greet # checkpoint defining the end of this turn diff --git a/data/test_mixed_yaml_md_stories/stories_part_2.md b/data/test_mixed_yaml_md_stories/stories_part_2.md new file mode 100644 index 000000000000..b0bdf502194b --- /dev/null +++ b/data/test_mixed_yaml_md_stories/stories_part_2.md @@ -0,0 +1,24 @@ +## simple_story_with_multiple_turns +* affirm OR thank_you + - utter_default +* goodbye + - utter_goodbye +> check_goodbye + +## why does the user want to leave? +> check_goodbye +* why + - utter_default +> check_greet + +## show_it_all +> check_greet +> check_hello + +* next_intent + - utter_greet + +> check_intermediate + +* change_bank_details + - utter_default diff --git a/data/test_multi_domain/config.yml b/data/test_multi_domain/config.yml index e7c4d7616564..61e0b5b7133b 100644 --- a/data/test_multi_domain/config.yml +++ b/data/test_multi_domain/config.yml @@ -1,10 +1,17 @@ language: en -pipeline: "pretrained_embeddings_spacy" +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: CRFEntityExtractor + - name: EntitySynonymMapper + - name: SklearnIntentClassifier policies: - name: MemoizationPolicy - - name: KerasPolicy + - name: TEDPolicy importers: - name: MultiProjectImporter diff --git a/data/test_multi_domain/data/GreetBot/domain.yml b/data/test_multi_domain/data/GreetBot/domain.yml index 593b0904775e..c683c9c83edb 100644 --- a/data/test_multi_domain/data/GreetBot/domain.yml +++ b/data/test_multi_domain/data/GreetBot/domain.yml @@ -2,11 +2,7 @@ intents: - greet - goodbye -actions: -- utter_greet -- utter_goodbye - -templates: +responses: utter_greet: - text: "Hey! How are you?" buttons: diff --git a/data/test_multi_domain/data/MoodBot/config.yml b/data/test_multi_domain/data/MoodBot/config.yml index c7a01c602509..9280fe205096 100644 --- a/data/test_multi_domain/data/MoodBot/config.yml +++ b/data/test_multi_domain/data/MoodBot/config.yml @@ -4,7 +4,7 @@ pipeline: "pretrained_embeddings_spacy" policies: - name: MemoizationPolicy - - name: KerasPolicy + - name: TEDPolicy imports: - ../GreetBot \ No newline at end of file diff --git a/data/test_multi_domain/data/MoodBot/domain.yml b/data/test_multi_domain/data/MoodBot/domain.yml index 2627e7d252e3..ff849acf812d 100644 --- a/data/test_multi_domain/data/MoodBot/domain.yml +++ b/data/test_multi_domain/data/MoodBot/domain.yml @@ -4,12 +4,7 @@ intents: - mood_great - mood_unhappy -actions: -- utter_did_that_help -- utter_happy -- utter_cheer_up - -templates: +responses: utter_cheer_up: - text: "Here is something to cheer you up:" image: "https://i.imgur.com/nGF1K8f.jpg" diff --git a/data/test_multi_domain/domain.yml b/data/test_multi_domain/domain.yml index f0915841e2ee..93a3518feac3 100644 --- a/data/test_multi_domain/domain.yml +++ b/data/test_multi_domain/domain.yml @@ -1,9 +1,6 @@ intents: - goodbye -actions: -- utter_goodbye - -templates: +responses: utter_goodbye: - text: "Bye" diff --git a/data/test_multifile_yaml_stories/stories_part_1.yml b/data/test_multifile_yaml_stories/stories_part_1.yml new file mode 100644 index 000000000000..15bff95aee89 --- /dev/null +++ b/data/test_multifile_yaml_stories/stories_part_1.yml @@ -0,0 +1,20 @@ +stories: +- story: simple_story_without_checkpoint + steps: + - intent: simple + - action: utter_default + - action: utter_greet + +- story: simple_story_with_only_start + steps: + - checkpoint: check_greet # checkpoints at the start define entry points + - intent: simple + - action: utter_default + +- story: simple_story_with_only_end + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter + - checkpoint: check_greet # checkpoint defining the end of this turn \ No newline at end of file diff --git a/data/test_multifile_yaml_stories/stories_part_2.yml b/data/test_multifile_yaml_stories/stories_part_2.yml new file mode 100644 index 000000000000..f8f7a3f9f555 --- /dev/null +++ b/data/test_multifile_yaml_stories/stories_part_2.yml @@ -0,0 +1,27 @@ +stories: +- story: simple_story_with_multiple_turns + steps: + - or: + - intent: affirm + - intent: thank_you + - action: utter_default + - intent: goodbye + - action: utter_goodbye + - checkpoint: check_goodbye + +- story: why does the user want to leave? + steps: + - checkpoint: check_goodbye + - intent: why + - action: utter_default + - checkpoint: check_greet + +- story: show_it_all + steps: + - checkpoint: check_greet + - checkpoint: check_hello # allows multiple entry points + - intent: next_intent + - action: utter_greet # actions taken by the bot + - checkpoint: check_intermediate # allows intermediate checkpoints + - intent: change_bank_details + - action: utter_default # allows to end without checkpoints diff --git a/rasa/cli/initial_project/data/nlu.md b/data/test_nlu/default_retrieval_intents.md similarity index 80% rename from rasa/cli/initial_project/data/nlu.md rename to data/test_nlu/default_retrieval_intents.md index 186620d584f9..56f50f2cbd8d 100644 --- a/rasa/cli/initial_project/data/nlu.md +++ b/data/test_nlu/default_retrieval_intents.md @@ -54,3 +54,12 @@ - are you a human? - am I talking to a bot? - am I talking to a human? + +## intent:chitchat/ask_name +- what's your name +- who are you? +- what are you called? + +## intent:chitchat/ask_weather +- how's weather? +- is it sunny where you are? diff --git a/data/test_responses/default.md b/data/test_responses/default.md new file mode 100644 index 000000000000..aeae1f9c9ac1 --- /dev/null +++ b/data/test_responses/default.md @@ -0,0 +1,7 @@ +## ask name +* chitchat/ask_name + - my name is Sara, Rasa's documentation bot! + +## ask weather +* chitchat/ask_weather + - it's always sunny where I live diff --git a/data/test_stories/rules_without_stories.md b/data/test_stories/rules_without_stories.md new file mode 100644 index 000000000000..c85e11f73080 --- /dev/null +++ b/data/test_stories/rules_without_stories.md @@ -0,0 +1,23 @@ +>> rule 1 + - form{"name": "loop_q_form"} + - slot{"requested_slot": "some_slot"} + - ... +* inform{"some_slot":"bla"} + - loop_q_form + +>> rule 2 + - form{"name": "loop_q_form"} + - slot{"requested_slot": "some_slot"} + - ... +* explain + - utter_explain_some_slot + - loop_q_form + - form{"name": "loop_q_form"} + +>> rule 3 + - form{"name": "loop_q_form"} + - ... + - loop_q_form + - form{"name": null} + - slot{"requested_slot": null} + - action_stop_q_form diff --git a/data/test_stories/stories_conflicting_1.md b/data/test_stories/stories_conflicting_1.md new file mode 100644 index 000000000000..d772f46ee33a --- /dev/null +++ b/data/test_stories/stories_conflicting_1.md @@ -0,0 +1,15 @@ +## story 1 +* greet + - utter_greet +* greet + - utter_greet +* greet + - utter_greet + +## story 2 +* default + - utter_greet +* greet + - utter_greet +* greet + - utter_default diff --git a/data/test_stories/stories_conflicting_2.md b/data/test_stories/stories_conflicting_2.md new file mode 100644 index 000000000000..001b7087c700 --- /dev/null +++ b/data/test_stories/stories_conflicting_2.md @@ -0,0 +1,14 @@ +## greetings +* greet + - utter_greet +> check_greet + +## happy path +> check_greet +* default + - utter_default + +## problem +> check_greet +* default + - utter_goodbye diff --git a/data/test_stories/stories_conflicting_3.md b/data/test_stories/stories_conflicting_3.md new file mode 100644 index 000000000000..2218f6cea164 --- /dev/null +++ b/data/test_stories/stories_conflicting_3.md @@ -0,0 +1,14 @@ +## greetings +* greet + - utter_greet +> check_greet + +## happy path +> check_greet +* default OR greet + - utter_default + +## problem +> check_greet +* greet + - utter_goodbye diff --git a/data/test_stories/stories_conflicting_4.md b/data/test_stories/stories_conflicting_4.md new file mode 100644 index 000000000000..372c38ff6d15 --- /dev/null +++ b/data/test_stories/stories_conflicting_4.md @@ -0,0 +1,17 @@ +## story 1 +* greet + - utter_greet +* greet + - slot{"cuisine": "German"} + - utter_greet +* greet + - utter_greet + +## story 2 +* greet + - utter_greet +* greet + - slot{"cuisine": "German"} + - utter_greet +* greet + - utter_default diff --git a/data/test_stories/stories_conflicting_5.md b/data/test_stories/stories_conflicting_5.md new file mode 100644 index 000000000000..6865c9db9b4f --- /dev/null +++ b/data/test_stories/stories_conflicting_5.md @@ -0,0 +1,16 @@ +## story 1 +* greet + - utter_greet +* greet + - utter_greet + - slot{"cuisine": "German"} +* greet + - utter_greet + +## story 2 +* greet + - utter_greet +* greet + - utter_greet +* greet + - utter_default diff --git a/data/test_stories/stories_conflicting_6.md b/data/test_stories/stories_conflicting_6.md new file mode 100644 index 000000000000..f58dc258078e --- /dev/null +++ b/data/test_stories/stories_conflicting_6.md @@ -0,0 +1,22 @@ +## story 1 +* greet + - utter_greet + +## story 2 +* greet + - utter_default + +## story 3 +* greet + - utter_default +* greet + +## story 4 +* greet + - utter_default +* default + +## story 5 +* greet + - utter_default +* goodbye diff --git a/rasa/cli/initial_project/data/stories.md b/data/test_stories/stories_retrieval_intents.md similarity index 90% rename from rasa/cli/initial_project/data/stories.md rename to data/test_stories/stories_retrieval_intents.md index 306c3bbbbe36..a447a4e4cf1e 100644 --- a/rasa/cli/initial_project/data/stories.md +++ b/data/test_stories/stories_retrieval_intents.md @@ -29,3 +29,7 @@ ## bot challenge * bot_challenge - utter_iamabot + +## chitchat +* chitchat + - respond_chitchat \ No newline at end of file diff --git a/data/test_stories/stories_with_rules.md b/data/test_stories/stories_with_rules.md new file mode 100644 index 000000000000..bf9ce2e940c1 --- /dev/null +++ b/data/test_stories/stories_with_rules.md @@ -0,0 +1,48 @@ +>> rule 1 + - form{"name": "loop_q_form"} + - slot{"requested_slot": "some_slot"} + - ... +* inform{"some_slot":"bla"} + - loop_q_form + +>> rule 2 + - form{"name": "loop_q_form"} + - slot{"requested_slot": "some_slot"} + - ... +* explain + - utter_explain_some_slot + - loop_q_form + - form{"name": "loop_q_form"} + +## ML story 1 +* greet + - utter_greet +* request_restaurant + - restaurant_form + - form{"name": "restaurant_form"} + - form{"name": null} + - utter_slots_values +* thankyou + - utter_noworries + +>> rule 3 + - form{"name": "loop_q_form"} + - ... + - loop_q_form + - form{"name": null} + - slot{"requested_slot": null} + - action_stop_q_form + +## ML story 2 +* greet + - utter_greet +* request_restaurant + - restaurant_form + - form{"name": "restaurant_form"} +* chitchat + - utter_chitchat + - restaurant_form + - form{"name": null} + - utter_slots_values +* thankyou + - utter_noworries diff --git a/data/test_tokenizers/naughty_strings.json b/data/test_tokenizers/naughty_strings.json new file mode 100644 index 000000000000..56290bfa8391 --- /dev/null +++ b/data/test_tokenizers/naughty_strings.json @@ -0,0 +1,517 @@ +[ + "", + "undefined", + "undef", + "null", + "NULL", + "(null)", + "nil", + "NIL", + "true", + "false", + "True", + "False", + "TRUE", + "FALSE", + "None", + "hasOwnProperty", + "then", + "\\", + "\\\\", + "0", + "1", + "1.00", + "$1.00", + "1/2", + "1E2", + "1E02", + "1E+02", + "-1", + "-1.00", + "-$1.00", + "-1/2", + "-1E2", + "-1E02", + "-1E+02", + "1/0", + "0/0", + "-2147483648/-1", + "-9223372036854775808/-1", + "-0", + "-0.0", + "+0", + "+0.0", + "0.00", + "0..0", + ".", + "0.0.0", + "0,00", + "0,,0", + ",", + "0,0,0", + "0.0/0", + "1.0/0.0", + "0.0/0.0", + "1,0/0,0", + "0,0/0,0", + "--1", + "-", + "-.", + "-,", + "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", + "NaN", + "Infinity", + "-Infinity", + "INF", + "1#INF", + "-1#IND", + "1#QNAN", + "1#SNAN", + "1#IND", + "0x0", + "0xffffffff", + "0xffffffffffffffff", + "0xabad1dea", + "123456789012345678901234567890123456789", + "1,000.00", + "1 000.00", + "1'000.00", + "1,000,000.00", + "1 000 000.00", + "1'000'000.00", + "1.000,00", + "1 000,00", + "1'000,00", + "1.000.000,00", + "1 000 000,00", + "1'000'000,00", + "01000", + "08", + "09", + "2.2250738585072011e-308", + ",./;'[]\\-=", + "<>?:\"{}|_+", + "!@#$%^&*()`~", + "\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f", + "€‚ƒ„†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ", + "\t\u000b\f …             ​

   ", + "­؀؁؂؃؄؅؜۝܏᠎​‌‍‎‏‪‫‬‭‮⁠⁡⁢⁣⁤⁦⁧⁨⁩𑂽𛲠𛲡𛲢𛲣𝅳𝅴𝅵𝅶𝅷𝅸𝅹𝅺󠀁󠀠󠀡󠀢󠀣󠀤󠀥󠀦󠀧󠀨󠀩󠀪󠀫󠀬󠀭󠀮󠀯󠀰󠀱󠀲󠀳󠀴󠀵󠀶󠀷󠀸󠀹󠀺󠀻󠀼󠀽󠀾󠀿󠁀󠁁󠁂󠁃󠁄󠁅󠁆󠁇󠁈󠁉󠁊󠁋󠁌󠁍󠁎󠁏󠁐󠁑󠁒󠁓󠁔󠁕󠁖󠁗󠁘󠁙󠁚󠁛󠁜󠁝󠁞󠁟󠁠󠁡󠁢󠁣󠁤󠁥󠁦󠁧󠁨󠁩󠁪󠁫󠁬󠁭󠁮󠁯󠁰󠁱󠁲󠁳󠁴󠁵󠁶󠁷󠁸󠁹󠁺󠁻󠁼󠁽󠁾󠁿", + "", + "￾", + "Ω≈ç√∫˜µ≤≥÷", + "åß∂ƒ©˙∆˚¬…æ", + "œ∑´®†¥¨ˆøπ“‘", + "¡™£¢∞§¶•ªº–≠", + "¸˛Ç◊ı˜Â¯˘¿", + "ÅÍÎÏ˝ÓÔÒÚÆ☃", + "Œ„´‰ˇÁ¨ˆØ∏”’", + "`⁄€‹›fifl‡°·‚—±", + "⅛⅜⅝⅞", + "ЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюя", + "٠١٢٣٤٥٦٧٨٩", + "⁰⁴⁵", + "₀₁₂", + "⁰⁴⁵₀₁₂", + "ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็", + "'", + "\"", + "''", + "\"\"", + "'\"'", + "\"''''\"'\"", + "\"'\"'\"''''\"", + "", + "", + "", + "", + "田中さんにあげて下さい", + "パーティーへ行かないか", + "和製漢語", + "部落格", + "사회과학원 어학연구소", + "찦차를 타고 온 펲시맨과 쑛다리 똠방각하", + "社會科學院語學研究所", + "울란바토르", + "𠜎𠜱𠝹𠱓𠱸𠲖𠳏", + "𐐜 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐙𐐊𐐡𐐝𐐓/𐐝𐐇𐐗𐐊𐐤𐐔 𐐒𐐋𐐗 𐐒𐐌 𐐜 𐐡𐐀𐐖𐐇𐐤𐐓𐐝 𐐱𐑂 𐑄 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐏𐐆𐐅𐐤𐐆𐐚𐐊𐐡𐐝𐐆𐐓𐐆", + "表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀", + "Ⱥ", + "Ⱦ", + "ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ", + "(。◕ ∀ ◕。)", + "`ィ(´∀`∩", + "__ロ(,_,*)", + "・( ̄∀ ̄)・:*:", + "゚・✿ヾ╲(。◕‿◕。)╱✿・゚", + ",。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’", + "(╯°□°)╯︵ ┻━┻)", + "(ノಥ益ಥ)ノ ┻━┻", + "┬─┬ノ( º _ ºノ)", + "( ͡° ͜ʖ ͡°)", + "¯\\_(ツ)_/¯", + "😍", + "👩🏽", + "👨‍🦰 👨🏿‍🦰 👨‍🦱 👨🏿‍🦱 🦹🏿‍♂️", + "👾 🙇 💁 🙅 🙆 🙋 🙎 🙍", + "🐵 🙈 🙉 🙊", + "❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙", + "✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿", + "👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦", + "🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧", + "0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟", + "🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸", + "🇺🇸🇷🇺🇸🇦🇫🇦🇲", + "🇺🇸🇷🇺🇸🇦", + "123", + "١٢٣", + "ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.", + "בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ", + "הָיְתָהtestالصفحات التّحول", + "﷽", + "ﷺ", + "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ، ", + "᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜‪‪‪", + "‪‪᚛                 ᚜‪", + "‪‪test‪", + "‫test‫", + "
test
", + "test⁠test‫", + "⁦test⁧", + "Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣", + "̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰", + "̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟", + "̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕", + "Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮", + "˙ɐnbᴉlɐ ɐuƃɐɯ ǝɹolop ʇǝ ǝɹoqɐl ʇn ʇunpᴉpᴉɔuᴉ ɹodɯǝʇ poɯsnᴉǝ op pǝs 'ʇᴉlǝ ƃuᴉɔsᴉdᴉpɐ ɹnʇǝʇɔǝsuoɔ 'ʇǝɯɐ ʇᴉs ɹolop ɯnsdᴉ ɯǝɹo˥", + "00˙Ɩ$-", + "The quick brown fox jumps over the lazy dog", + "𝐓𝐡𝐞 𝐪𝐮𝐢𝐜𝐤 𝐛𝐫𝐨𝐰𝐧 𝐟𝐨𝐱 𝐣𝐮𝐦𝐩𝐬 𝐨𝐯𝐞𝐫 𝐭𝐡𝐞 𝐥𝐚𝐳𝐲 𝐝𝐨𝐠", + "𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌", + "𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 𝒇𝒐𝒙 𝒋𝒖𝒎𝒑𝒔 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒍𝒂𝒛𝒚 𝒅𝒐𝒈", + "𝓣𝓱𝓮 𝓺𝓾𝓲𝓬𝓴 𝓫𝓻𝓸𝔀𝓷 𝓯𝓸𝔁 𝓳𝓾𝓶𝓹𝓼 𝓸𝓿𝓮𝓻 𝓽𝓱𝓮 𝓵𝓪𝔃𝔂 𝓭𝓸𝓰", + "𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 𝕗𝕠𝕩 𝕛𝕦𝕞𝕡𝕤 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕝𝕒𝕫𝕪 𝕕𝕠𝕘", + "𝚃𝚑𝚎 𝚚𝚞𝚒𝚌𝚔 𝚋𝚛𝚘𝚠𝚗 𝚏𝚘𝚡 𝚓𝚞𝚖𝚙𝚜 𝚘𝚟𝚎𝚛 𝚝𝚑𝚎 𝚕𝚊𝚣𝚢 𝚍𝚘𝚐", + "⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢", + "", + "<script>alert('123');</script>", + "", + "", + "\">", + "'>", + ">", + "", + "< / script >< script >alert(123)< / script >", + " onfocus=JaVaSCript:alert(123) autofocus", + "\" onfocus=JaVaSCript:alert(123) autofocus", + "' onfocus=JaVaSCript:alert(123) autofocus", + "<script>alert(123)</script>", + "ript>alert(123)ript>", + "-->", + "\";alert(123);t=\"", + "';alert(123);t='", + "JavaSCript:alert(123)", + ";alert(123);", + "src=JaVaSCript:prompt(132)", + "\"><\\x3Cscript>javascript:alert(1)", + "'`\"><\\x00script>javascript:alert(1)", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "ABC
DEF", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "test", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "`\"'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "\"`'>", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "XXX", + "javascript:alert(1)\"` `>", + "", + "", + "<a href=http://foo.bar/#x=`y></a><img alt=\"`><img src=x:x onerror=javascript:alert(1)></a>\">", + "<!--[if]><script>javascript:alert(1)</script -->", + "<!--[if<img src=x onerror=javascript:alert(1)//]> -->", + "<script src=\"/\\%(jscript)s\"></script>", + "<script src=\"\\\\%(jscript)s\"></script>", + "<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">", + "<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>", + "<IMG SRC=# onmouseover=\"alert('xxs')\">", + "<IMG SRC= onmouseover=\"alert('xxs')\">", + "<IMG onmouseover=\"alert('xxs')\">", + "<IMG SRC=javascript:alert('XSS')>", + "<IMG SRC=javascript:alert('XSS')>", + "<IMG SRC=javascript:alert('XSS')>", + "<IMG SRC=\"jav ascript:alert('XSS');\">", + "<IMG SRC=\"jav ascript:alert('XSS');\">", + "<IMG SRC=\"jav ascript:alert('XSS');\">", + "<IMG SRC=\"jav ascript:alert('XSS');\">", + "perl -e 'print \"<IMG SRC=java\\0script:alert(\\\"XSS\\\")>\";' > out", + "<IMG SRC=\"  javascript:alert('XSS');\">", + "<SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>", + "<BODY onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>", + "<SCRIPT/SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>", + "<<SCRIPT>alert(\"XSS\");//<</SCRIPT>", + "<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >", + "<SCRIPT SRC=//ha.ckers.org/.j>", + "<IMG SRC=\"javascript:alert('XSS')\"", + "<iframe src=http://ha.ckers.org/scriptlet.html <", + "\\\";alert('XSS');//", + "<u oncopy=alert()> Copy me</u>", + "<i onwheel=alert(1)> Scroll over me </i>", + "<plaintext>", + "http://a/%%30%30", + "</textarea><script>alert(123)</script>", + "1;DROP TABLE users", + "1'; DROP TABLE users-- 1", + "' OR 1=1 -- 1", + "' OR '1'='1", + "'; EXEC sp_MSForEachTable 'DROP TABLE ?'; --", + " ", + "%", + "_", + "-", + "--", + "--version", + "--help", + "$USER", + "/dev/null; touch /tmp/blns.fail ; echo", + "`touch /tmp/blns.fail`", + "$(touch /tmp/blns.fail)", + "@{[system \"touch /tmp/blns.fail\"]}", + "eval(\"puts 'hello world'\")", + "System(\"ls -al /\")", + "`ls -al /`", + "Kernel.exec(\"ls -al /\")", + "Kernel.exit(1)", + "%x('ls -al /')", + "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><!DOCTYPE foo [ <!ELEMENT foo ANY ><!ENTITY xxe SYSTEM \"file:///etc/passwd\" >]><foo>&xxe;</foo>", + "$HOME", + "$ENV{'HOME'}", + "%d", + "%s%s%s%s%s", + "{0}", + "%*.*s", + "%@", + "%n", + "File:///", + "../../../../../../../../../../../etc/passwd%00", + "../../../../../../../../../../../etc/hosts", + "() { 0; }; touch /tmp/blns.shellshock1.fail;", + "() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }", + "<<< %s(un='%s') = %u", + "+++ATH0", + "CON", + "PRN", + "AUX", + "CLOCK$", + "NUL", + "A:", + "ZZ:", + "COM1", + "LPT1", + "LPT2", + "LPT3", + "COM2", + "COM3", + "COM4", + "DCC SEND STARTKEYLOGGER 0 0 0", + "Scunthorpe General Hospital", + "Penistone Community Church", + "Lightwater Country Park", + "Jimmy Clitheroe", + "Horniman Museum", + "shitake mushrooms", + "RomansInSussex.co.uk", + "http://www.cum.qc.ca/", + "Craig Cockburn, Software Specialist", + "Linda Callahan", + "Dr. Herman I. Libshitz", + "magna cum laude", + "Super Bowl XXX", + "medieval erection of parapets", + "evaluate", + "mocha", + "expression", + "Arsenal canal", + "classic", + "Tyson Gay", + "Dick Van Dyke", + "basement", + "If you're reading this, you've been in a coma for almost 20 years now. We're trying a new technique. We don't know where this message will end up in your dream, but we hope it works. Please wake up, we miss you.", + "Roses are \u001b[0;31mred\u001b[0m, violets are \u001b[0;34mblue. Hope you enjoy terminal hue", + "But now...\u001b[20Cfor my greatest trick...\u001b[8m", + "The quic\b\b\b\b\b\bk brown fo\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007x... [Beeeep]", + "Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗", + "🏳0🌈️", + "జ్ఞ‌ా", + "گچپژ", + "{% print 'x' * 64 * 1024**3 %}", + "{{ \"\".__class__.__mro__[2].__subclasses__()[40](\"/etc/passwd\").read() }}" +] \ No newline at end of file diff --git a/data/test_trackers/tracker_moodbot.json b/data/test_trackers/tracker_moodbot.json index cf36fde2fe36..4103cedf0f66 100644 --- a/data/test_trackers/tracker_moodbot.json +++ b/data/test_trackers/tracker_moodbot.json @@ -23,7 +23,7 @@ }, "sender_id": "mysender", "latest_action_name": "action_listen", - "active_form": {}, + "active_loop": {}, "paused": false, "latest_event_time": 1517821726.211042, "followup_action": null, @@ -39,6 +39,7 @@ }, { "timestamp": 1517821726.200036, + "metadata": {}, "parse_data": { "entities": [], "intent": { @@ -63,7 +64,7 @@ }, "event": "user", "text": "/greet", - "input_channel": null, "message_id": null, "metadata": null + "input_channel": null, "message_id": null, "metadata": {} }, { "timestamp": 1517821726.200373, @@ -81,6 +82,7 @@ }, { "timestamp": 1517821726.209836, + "metadata": {}, "parse_data": { "entities": [], "intent": { @@ -105,13 +107,13 @@ }, "event": "user", "text": "/mood_great", - "input_channel": null, "message_id": null, "metadata": null + "input_channel": null, "message_id": null, "metadata": {} }, { "timestamp": 1517821726.209908, "event": "action", "name": "utter_happy", - "policy": "policy_1_KerasPolicy", + "policy": "policy_1_TEDPolicy", "confidence": 0.8 }, { diff --git a/data/test_wrong_yaml_stories/intent_with_leading_slash.yml b/data/test_wrong_yaml_stories/intent_with_leading_slash.yml new file mode 100644 index 000000000000..5b9a51f940e5 --- /dev/null +++ b/data/test_wrong_yaml_stories/intent_with_leading_slash.yml @@ -0,0 +1,6 @@ +stories: +- story: simple_story_without_checkpoint + steps: + - intent: /simple + - action: utter_default + - action: utter_greet diff --git a/data/test_wrong_yaml_stories/wrong_yaml.yml b/data/test_wrong_yaml_stories/wrong_yaml.yml new file mode 100644 index 000000000000..26951bc3f040 --- /dev/null +++ b/data/test_wrong_yaml_stories/wrong_yaml.yml @@ -0,0 +1 @@ +[dasdassd, diff --git a/data/test_yaml_stories/rules_without_stories.yml b/data/test_yaml_stories/rules_without_stories.yml new file mode 100644 index 000000000000..71b41d0b1f0a --- /dev/null +++ b/data/test_yaml_stories/rules_without_stories.yml @@ -0,0 +1,36 @@ +rules: +- rule: Rule with condition + condition: + - active_loop: loop_q_form + - slot_was_set: + - requested_slot: some_slot + steps: + - intent: inform + entities: + - some_slot: bla + - action: loop_q_form + +- rule: Rule without condition + steps: + - intent: explain + - action: utter_explain_some_slot + - action: loop_q_form + - active_loop: loop_q_form + +- rule: Rule which explicitly waits for user input when finished + steps: + - intent: explain + - action: utter_explain_some_slot + wait_for_user_input: True + +- rule: Rule after which another action should be predicted + steps: + - intent: explain + - action: utter_explain_some_slot + wait_for_user_input: False + +- rule: Rule which only applies to conversation start + conversation_start: True + steps: + - intent: explain + - action: utter_explain_some_slot diff --git a/data/test_yaml_stories/simple_story_with_only_end.yml b/data/test_yaml_stories/simple_story_with_only_end.yml new file mode 100644 index 000000000000..08e33164c76e --- /dev/null +++ b/data/test_yaml_stories/simple_story_with_only_end.yml @@ -0,0 +1,7 @@ +stories: +- story: simple_story_with_only_end + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter diff --git a/data/test_yaml_stories/stories.yml b/data/test_yaml_stories/stories.yml new file mode 100644 index 000000000000..695b82782055 --- /dev/null +++ b/data/test_yaml_stories/stories.yml @@ -0,0 +1,47 @@ +stories: +- story: simple_story_without_checkpoint + steps: + - intent: simple + - action: utter_default + - action: utter_greet + +- story: simple_story_with_only_start + steps: + - checkpoint: check_greet # checkpoints at the start define entry points + - intent: simple + - action: utter_default + +- story: simple_story_with_only_end + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter + - checkpoint: check_greet # checkpoint defining the end of this turn + +- story: simple_story_with_multiple_turns + steps: + - or: + - intent: affirm + - intent: thank_you + - action: utter_default + - intent: goodbye + - action: utter_goodbye + - checkpoint: check_goodbye + +- story: why does the user want to leave? + steps: + - checkpoint: check_goodbye + - intent: why + - action: utter_default + - checkpoint: check_greet + +- story: show_it_all + steps: + - checkpoint: check_greet + - checkpoint: check_hello # allows multiple entry points + - intent: next_intent + - action: utter_greet # actions taken by the bot + - checkpoint: check_intermediate # allows intermediate checkpoints + - intent: change_bank_details + - action: utter_default # allows to end without checkpoints diff --git a/data/test_yaml_stories/stories_and_rules.yml b/data/test_yaml_stories/stories_and_rules.yml new file mode 100644 index 000000000000..8a21d9b68191 --- /dev/null +++ b/data/test_yaml_stories/stories_and_rules.yml @@ -0,0 +1,49 @@ +rules: +- rule: rule 1 + condition: + - active_loop: loop_q_form + - slot_was_set: + - requested_slot: some_slot + steps: + - intent: inform + entities: + - some_slot: bla + - action: loop_q_form + +- rule: rule 2 + condition: + - active_loop: loop_q_form + - slot_was_set: + - requested_slot: some_slot + steps: + - intent: explain + - action: utter_explain_some_slot + - action: loop_q_form + - active_loop: loop_q_form + +- rule: rule 3 + steps: + - action: loop_q_form + - active_loop: null + - action: stop_q_form + +stories: +- story: simple_story_without_checkpoint + steps: + - intent: simple + - action: utter_default + - action: utter_greet + +- story: simple_story_with_only_start + steps: + - checkpoint: check_greet # checkpoints at the start define entry points + - intent: simple + - action: utter_default + +- story: simple_story_with_only_end + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter + - checkpoint: check_greet # checkpoint defining the end of this turn diff --git a/data/test_yaml_stories/stories_checkpoint_after_or.yml b/data/test_yaml_stories/stories_checkpoint_after_or.yml new file mode 100644 index 000000000000..68b661d2c424 --- /dev/null +++ b/data/test_yaml_stories/stories_checkpoint_after_or.yml @@ -0,0 +1,14 @@ +stories: +- story: story with checkpoint after or + steps: + - or: + - intent: affirm + - intent: thank_you + - checkpoint: check_after_or + +- story: story to continue checkpoint + steps: + - checkpoint: check_after_or + - action: utter_default + - intent: goodbye + - action: utter_goodbye diff --git a/data/test_yaml_stories/stories_defaultdomain.yml b/data/test_yaml_stories/stories_defaultdomain.yml new file mode 100644 index 000000000000..609613f3a5dc --- /dev/null +++ b/data/test_yaml_stories/stories_defaultdomain.yml @@ -0,0 +1,25 @@ +stories: +- story: simple_story_with_only_start + steps: + - checkpoint: check_greet # checkpoints at the start define entry points + - intent: default + - action: utter_default + +- story: simple_story_with_only_end + steps: + - or: + - intent: greet + - intent: greet + entities: + - name: Peter + - action: utter_greet + - checkpoint: check_greet # checkpoint defining the end of this turn + +- story: simple_story_with_multiple_turns + steps: + - intent: greet + - action: utter_greet + - intent: default + - action: utter_default + - intent: goodbye + - action: utter_goodbye diff --git a/data/test_yaml_stories/stories_restart.yml b/data/test_yaml_stories/stories_restart.yml new file mode 100644 index 000000000000..c48e49cfe89b --- /dev/null +++ b/data/test_yaml_stories/stories_restart.yml @@ -0,0 +1,8 @@ +stories: +- story: simple_story_with_restart + steps: + - intent: greet + - action: utter_greet + - intent: goodbye + - action: action_restart + - action: restart diff --git a/data/test_yaml_stories/stories_unused_checkpoints.yml b/data/test_yaml_stories/stories_unused_checkpoints.yml new file mode 100644 index 000000000000..5260347ea08f --- /dev/null +++ b/data/test_yaml_stories/stories_unused_checkpoints.yml @@ -0,0 +1,23 @@ +stories: +- story: simple_story_with_end_checkpoint_1 + steps: + - intent: simple + - action: utter_default + - action: utter_greet + - checkpoint: check_end_1 + +- story: simple_story_with_end_checkpoint_2 + steps: + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter + - checkpoint: check_end_2 + +- story: simple_story_with_start + steps: + - checkpoint: check_start + - intent: hello + - action: utter_greet + - slot_was_set: + - name: peter diff --git a/data/test_yaml_stories/stories_with_cycle.yml b/data/test_yaml_stories/stories_with_cycle.yml new file mode 100644 index 000000000000..1063d6693b52 --- /dev/null +++ b/data/test_yaml_stories/stories_with_cycle.yml @@ -0,0 +1,38 @@ +stories: +- story: utter greet + steps: + - intent: greet + - action: utter_greet + - checkpoint: get_name + +- story: user no name + steps: + - checkpoint: get_name + - intent: default + entities: + - name: null + - checkpoint: process_name + +- story: user sends name + steps: + - checkpoint: get_name + - intent: default + entities: + - name: "Josh" + - checkpoint: process_name + +- story: goodbye + steps: + - checkpoint: process_name + slot_was_set: + - name: "Josh" + - action: utter_goodbye + - action: action_restart + +- story: utter default + steps: + - checkpoint: process_name + slot_was_set: + - name: null + - action: utter_default + - checkpoint: get_name diff --git a/data/test_yaml_stories/story_with_or_and_entities.yml b/data/test_yaml_stories/story_with_or_and_entities.yml new file mode 100644 index 000000000000..e4e286f50157 --- /dev/null +++ b/data/test_yaml_stories/story_with_or_and_entities.yml @@ -0,0 +1,10 @@ +stories: +- story: story with or and entities + steps: + - or: + - intent: greet + - intent: greet + entities: + # slot with the same name was autofilled + - name: peter + - action: utter_greet diff --git a/data/test_yaml_stories/story_with_or_and_entities_with_no_value.yml b/data/test_yaml_stories/story_with_or_and_entities_with_no_value.yml new file mode 100644 index 000000000000..20ddfeab4cc4 --- /dev/null +++ b/data/test_yaml_stories/story_with_or_and_entities_with_no_value.yml @@ -0,0 +1,8 @@ +stories: +- story: story with or and entities with no value + steps: + - intent: greet + entities: + # slot with the same name was NOT autofilled + - name + - action: utter_greet diff --git a/data/test_yaml_stories/story_with_slot_was_set.yml b/data/test_yaml_stories/story_with_slot_was_set.yml new file mode 100644 index 000000000000..a641b4350441 --- /dev/null +++ b/data/test_yaml_stories/story_with_slot_was_set.yml @@ -0,0 +1,5 @@ +stories: +- story: story with slot + steps: + - slot_was_set: + - name \ No newline at end of file diff --git a/docker/Dockerfile_full b/docker/Dockerfile_full index 280db163b2d7..d628cab2a019 100644 --- a/docker/Dockerfile_full +++ b/docker/Dockerfile_full @@ -1,14 +1,14 @@ -FROM python:3.6-slim as builder -# if this installation process changes, the enterprise container needs to be -# updated as well -WORKDIR /build -COPY . . -RUN python setup.py sdist bdist_wheel -RUN find dist -maxdepth 1 -mindepth 1 -name '*.tar.gz' -print0 | xargs -0 -I {} mv {} rasa.tar.gz +FROM python:3.7-slim as base -FROM python:3.6-slim +RUN apt-get update -qq \ + && apt-get install -y --no-install-recommends \ + # required by psycopg2 at build and runtime + libpq-dev \ + # required for health check + curl \ + && apt-get autoremove -y -SHELL ["/bin/bash", "-c"] +FROM base as builder RUN apt-get update -qq && \ apt-get install -y --no-install-recommends \ @@ -22,43 +22,64 @@ RUN apt-get update -qq && \ libssl-dev \ libffi6 \ libffi-dev \ - libpng-dev \ - libpq-dev \ - curl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /install && \ - mkdir /app + libpng-dev + +# install poetry +# keep this in sync with the version in pyproject.toml and Dockerfile +ENV POETRY_VERSION 1.0.5 +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +ENV PATH "/root/.poetry/bin:/opt/venv/bin:${PATH}" -WORKDIR /install +# copy files +COPY . /build/ +COPY docker/configs/config_pretrained_embeddings_spacy_en_duckling.yml /build/config.yml -# Copy as early as possible so we can cache ... -COPY alt_requirements/ ./alt_requirements -COPY requirements.txt . +# download mitie model +RUN wget -P /build/data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat -RUN pip install -r alt_requirements/requirements_full.txt +# change working directory +WORKDIR /build -COPY --from=builder /build/rasa.tar.gz . -RUN pip install ./rasa.tar.gz[sql,spacy,mitie] +# install dependencies +RUN python -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --no-cache-dir -U 'pip<20' && \ + poetry install --extras full --no-dev --no-root --no-interaction && \ + make install-mitie && \ + poetry build -f wheel -n && \ + pip install --no-deps dist/*.whl && \ + rm -rf dist *.egg-info -RUN apt-get update -qq \ - && apt-get install -y --no-install-recommends wget \ - && wget -P /app/data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat \ - && apt-get remove -y wget \ - && apt-get autoremove -y +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" + +# spacy link +RUN python -m spacy download en_core_web_md && \ + python -m spacy download de_core_news_sm && \ + python -m spacy link en_core_web_md en && \ + python -m spacy link de_core_news_sm de -RUN pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.1.0/en_core_web_md-2.1.0.tar.gz#egg=en_core_web_md==2.1.0 --no-cache-dir > /dev/null \ - && python -m spacy link en_core_web_md en \ - && pip install https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-2.1.0/de_core_news_sm-2.1.0.tar.gz#egg=de_core_news_sm==2.1.0 --no-cache-dir > /dev/null \ - && python -m spacy link de_core_news_sm de +# start a new build stage +FROM base as runner -COPY sample_configs/config_pretrained_embeddings_spacy_duckling.yml /app/config.yml +# copy everything from /opt +COPY --from=builder /opt/venv /opt/venv -VOLUME ["/app"] +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" + +# update permissions & change user to not run as root WORKDIR /app +RUN chgrp -R 0 /app && chmod -R g=u /app +USER 1001 -EXPOSE 5005 +# Create a volume for temporary data +VOLUME /tmp -ENTRYPOINT ["rasa"] +# change shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# the entry point +EXPOSE 5005 +ENTRYPOINT ["rasa"] CMD ["--help"] diff --git a/docker/Dockerfile_pretrained_embeddings_mitie_en b/docker/Dockerfile_pretrained_embeddings_mitie_en index 18d929da8be1..346517570ac6 100644 --- a/docker/Dockerfile_pretrained_embeddings_mitie_en +++ b/docker/Dockerfile_pretrained_embeddings_mitie_en @@ -1,14 +1,14 @@ -FROM python:3.6-slim as builder -# if this installation process changes, the enterprise container needs to be -# updated as well -WORKDIR /build -COPY . . -RUN python setup.py sdist bdist_wheel -RUN find dist -maxdepth 1 -mindepth 1 -name '*.tar.gz' -print0 | xargs -0 -I {} mv {} rasa.tar.gz +FROM python:3.7-slim as base -FROM python:3.6-slim +RUN apt-get update -qq \ + && apt-get install -y --no-install-recommends \ + # required by psycopg2 at build and runtime + libpq-dev \ + # required for health check + curl \ + && apt-get autoremove -y -SHELL ["/bin/bash", "-c"] +FROM base as builder RUN apt-get update -qq && \ apt-get install -y --no-install-recommends \ @@ -22,39 +22,55 @@ RUN apt-get update -qq && \ libssl-dev \ libffi6 \ libffi-dev \ - libpng-dev \ - libpq-dev \ - curl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /install && \ - mkdir /app + libpng-dev -WORKDIR /install +# install poetry +# keep this in sync with the version in pyproject.toml and Dockerfile +ENV POETRY_VERSION 1.0.5 +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +ENV PATH "/root/.poetry/bin:/opt/venv/bin:${PATH}" -# Copy as early as possible so we can cache ... -COPY alt_requirements/ ./alt_requirements -COPY requirements.txt . +# copy files +COPY . /build/ +COPY docker/configs/config_pretrained_embeddings_mitie.yml /build/config.yml -RUN pip install -r alt_requirements/requirements_pretrained_embeddings_mitie.txt +# download mitie model +RUN wget -P /build/data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat -COPY --from=builder /build/rasa.tar.gz . -RUN pip install ./rasa.tar.gz[sql,mitie] +# change working directory +WORKDIR /build -RUN apt-get update -qq \ - && apt-get install -y --no-install-recommends wget \ - && wget -P /app/data/ https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat \ - && apt-get remove -y wget \ - && apt-get autoremove -y +# install dependencies +RUN python -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --no-cache-dir -U 'pip<20' && \ + poetry install --no-dev --no-root --no-interaction && \ + make install-mitie && \ + poetry build -f wheel -n && \ + pip install --no-deps dist/*.whl && \ + rm -rf dist *.egg-info + +# start a new build stage +FROM base as runner +# copy everything from /opt +COPY --from=builder /opt/venv /opt/venv -COPY sample_configs/config_pretrained_embeddings_mitie.yml /app/config.yml +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -VOLUME ["/app"] +# update permissions & change user to not run as root WORKDIR /app +RUN chgrp -R 0 /app && chmod -R g=u /app +USER 1001 -EXPOSE 5005 +# create a volume for temporary data +VOLUME /tmp -ENTRYPOINT ["rasa"] +# change shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# the entry point +EXPOSE 5005 +ENTRYPOINT ["rasa"] CMD ["--help"] diff --git a/docker/Dockerfile_pretrained_embeddings_spacy_de b/docker/Dockerfile_pretrained_embeddings_spacy_de index 451093e04f6d..9259df21d295 100644 --- a/docker/Dockerfile_pretrained_embeddings_spacy_de +++ b/docker/Dockerfile_pretrained_embeddings_spacy_de @@ -1,14 +1,14 @@ -FROM python:3.6-slim as builder -# if this installation process changes, the enterprise container needs to be -# updated as well -WORKDIR /build -COPY . . -RUN python setup.py sdist bdist_wheel -RUN find dist -maxdepth 1 -mindepth 1 -name '*.tar.gz' -print0 | xargs -0 -I {} mv {} rasa.tar.gz +FROM python:3.7-slim as base -FROM python:3.6-slim +RUN apt-get update -qq \ + && apt-get install -y --no-install-recommends \ + # required by psycopg2 at build and runtime + libpq-dev \ + # required for health check + curl \ + && apt-get autoremove -y -SHELL ["/bin/bash", "-c"] +FROM base as builder RUN apt-get update -qq && \ apt-get install -y --no-install-recommends \ @@ -22,35 +22,58 @@ RUN apt-get update -qq && \ libssl-dev \ libffi6 \ libffi-dev \ - libpng-dev \ - libpq-dev \ - curl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /install && \ - mkdir /app + libpng-dev + +# install poetry +# keep this in sync with the version in pyproject.toml and Dockerfile +ENV POETRY_VERSION 1.0.5 +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +ENV PATH "/root/.poetry/bin:/opt/venv/bin:${PATH}" + +# copy files +COPY . /build/ +COPY docker/configs/config_pretrained_embeddings_spacy_de.yml /build/config.yml + +# change working directory +WORKDIR /build -WORKDIR /install +# install dependencies +RUN python -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --no-cache-dir -U 'pip<20' && \ + poetry install --extras spacy --no-dev --no-root --no-interaction && \ + poetry build -f wheel -n && \ + pip install --no-deps dist/*.whl && \ + rm -rf dist *.egg-info -# Copy as early as possible so we can cache ... -COPY alt_requirements/ ./alt_requirements -COPY requirements.txt . +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -RUN pip install -r alt_requirements/requirements_pretrained_embeddings_spacy.txt +# spacy link +RUN python -m spacy download de_core_news_sm && \ + python -m spacy link de_core_news_sm de -COPY --from=builder /build/rasa.tar.gz . -RUN pip install ./rasa.tar.gz[sql,spacy] +# start a new build stage +FROM base as runner -RUN pip install https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-2.1.0/de_core_news_sm-2.1.0.tar.gz#egg=de_core_news_sm==2.1.0 --no-cache-dir > /dev/null \ - && python -m spacy link de_core_news_sm de +# copy everything from /opt +COPY --from=builder /opt/venv /opt/venv -COPY sample_configs/config_pretrained_embeddings_spacy_de.yml /app/config.yml +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -VOLUME ["/app"] +# update permissions & change user to not run as root WORKDIR /app +RUN chgrp -R 0 /app && chmod -R g=u /app +USER 1001 -EXPOSE 5005 +# Create a volume for temporary data +VOLUME /tmp -ENTRYPOINT ["rasa"] +# change shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# the entry point +EXPOSE 5005 +ENTRYPOINT ["rasa"] CMD ["--help"] diff --git a/docker/Dockerfile_pretrained_embeddings_spacy_en b/docker/Dockerfile_pretrained_embeddings_spacy_en index 3e3a6000200b..4e2202c41450 100644 --- a/docker/Dockerfile_pretrained_embeddings_spacy_en +++ b/docker/Dockerfile_pretrained_embeddings_spacy_en @@ -1,14 +1,14 @@ -FROM python:3.6-slim as builder -# if this installation process changes, the enterprise container needs to be -# updated as well -WORKDIR /build -COPY . . -RUN python setup.py sdist bdist_wheel -RUN find dist -maxdepth 1 -mindepth 1 -name '*.tar.gz' -print0 | xargs -0 -I {} mv {} rasa.tar.gz +FROM python:3.7-slim as base -FROM python:3.6-slim +RUN apt-get update -qq \ + && apt-get install -y --no-install-recommends \ + # required by psycopg2 at build and runtime + libpq-dev \ + # required for health check + curl \ + && apt-get autoremove -y -SHELL ["/bin/bash", "-c"] +FROM base as builder RUN apt-get update -qq && \ apt-get install -y --no-install-recommends \ @@ -22,35 +22,58 @@ RUN apt-get update -qq && \ libssl-dev \ libffi6 \ libffi-dev \ - libpng-dev \ - libpq-dev \ - curl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /install && \ - mkdir /app + libpng-dev + +# install poetry +# keep this in sync with the version in pyproject.toml and Dockerfile +ENV POETRY_VERSION 1.0.5 +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +ENV PATH "/root/.poetry/bin:/opt/venv/bin:${PATH}" + +# copy files +COPY . /build/ +COPY docker/configs/config_pretrained_embeddings_spacy_en.yml /build/config.yml + +# change working directory +WORKDIR /build -WORKDIR /install +# install dependencies +RUN python -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --no-cache-dir -U 'pip<20' && \ + poetry install --extras spacy --no-dev --no-root --no-interaction && \ + poetry build -f wheel -n && \ + pip install --no-deps dist/*.whl && \ + rm -rf dist *.egg-info -# Copy as early as possible so we can cache ... -COPY alt_requirements/ ./alt_requirements -COPY requirements.txt . +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -RUN pip install -r alt_requirements/requirements_pretrained_embeddings_spacy.txt +# spacy link +RUN python -m spacy download en_core_web_md && \ + python -m spacy link en_core_web_md en -COPY --from=builder /build/rasa.tar.gz . -RUN pip install ./rasa.tar.gz[sql,spacy] +# start a new build stage +FROM base as runner -RUN pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.1.0/en_core_web_md-2.1.0.tar.gz#egg=en_core_web_md==2.1.0 --no-cache-dir > /dev/null \ - && python -m spacy link en_core_web_md en +# copy everything from /opt +COPY --from=builder /opt/venv /opt/venv -COPY sample_configs/config_pretrained_embeddings_spacy.yml /app/config.yml +# make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" -VOLUME ["/app"] +# update permissions & change user to not run as root WORKDIR /app +RUN chgrp -R 0 /app && chmod -R g=u /app +USER 1001 -EXPOSE 5005 +# Create a volume for temporary data +VOLUME /tmp -ENTRYPOINT ["rasa"] +# change shell +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# the entry point +EXPOSE 5005 +ENTRYPOINT ["rasa"] CMD ["--help"] diff --git a/docker/configs/config_pretrained_embeddings_mitie.yml b/docker/configs/config_pretrained_embeddings_mitie.yml new file mode 100644 index 000000000000..1ff89972039a --- /dev/null +++ b/docker/configs/config_pretrained_embeddings_mitie.yml @@ -0,0 +1,11 @@ +language: "en" + +pipeline: + - name: MitieNLP + model: "data/total_word_feature_extractor.dat" + - name: MitieTokenizer + - name: MitieEntityExtractor + - name: EntitySynonymMapper + - name: RegexFeaturizer + - name: MitieFeaturizer + - name: SklearnIntentClassifier diff --git a/docker/configs/config_pretrained_embeddings_spacy_de.yml b/docker/configs/config_pretrained_embeddings_spacy_de.yml new file mode 100644 index 000000000000..c5068fe6377e --- /dev/null +++ b/docker/configs/config_pretrained_embeddings_spacy_de.yml @@ -0,0 +1,16 @@ +language: "de" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + - name: EntitySynonymMapper + - name: ResponseSelector diff --git a/docker/configs/config_pretrained_embeddings_spacy_en.yml b/docker/configs/config_pretrained_embeddings_spacy_en.yml new file mode 100644 index 000000000000..b6591e42bc97 --- /dev/null +++ b/docker/configs/config_pretrained_embeddings_spacy_en.yml @@ -0,0 +1,16 @@ +language: "en" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + - name: EntitySynonymMapper + - name: ResponseSelector diff --git a/docker/configs/config_pretrained_embeddings_spacy_en_duckling.yml b/docker/configs/config_pretrained_embeddings_spacy_en_duckling.yml new file mode 100644 index 000000000000..cd314e4485be --- /dev/null +++ b/docker/configs/config_pretrained_embeddings_spacy_en_duckling.yml @@ -0,0 +1,18 @@ +language: "en" + +pipeline: + - name: SpacyNLP + - name: SpacyTokenizer + - name: SpacyFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + - name: EntitySynonymMapper + - name: ResponseSelector + - name: DucklingHTTPExtractor + url: "http://duckling:8000" diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index eef05eb17ba0..efc735cd18ca 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -7,22 +7,9 @@ services: ports: - "5005:5005" volumes: - - "./rasa-app-data/models:/app/models" - - "./rasa-app-data/config:/app/config" - - "./rasa-app-data/project:/app/project" - - "./rasa-app-data/projects:/app/projects" - - "./rasa-app-data/logs:/app/logs" - - "./rasa-app-data/data:/app/data" + - "./rasa-app-data/:/app/" command: - - start - - -d - - ./model - - -c - - rest - - -u - - current/nlu - - --endpoints - - config/endpoints.yml + - run action_server: image: rasa/rasa-sdk:latest diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index c85910f8ec65..000000000000 --- a/docs/Makefile +++ /dev/null @@ -1,235 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXABUILD = sphinx-autobuild -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) - $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help -help: - @echo "Please use \`make <target>' where <target> is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " epub3 to make an epub3" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - @echo " dummy to check syntax errors of document sources" - -.PHONY: clean -clean: - rm -rf $(BUILDDIR)/* - -.PHONY: html -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: dirhtml -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -.PHONY: singlehtml -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -.PHONY: pickle -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rasa_nlu.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rasa_nlu.qhc" - -.PHONY: applehelp -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/rasa_nlu" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rasa_nlu" - @echo "# devhelp" - -.PHONY: epub -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: epub3 -epub3: - $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 - @echo - @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." - -.PHONY: latex -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -.PHONY: linkcheck -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -.PHONY: doctest -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -.PHONY: coverage -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -.PHONY: xml -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." - -.PHONY: dummy -dummy: - $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy - @echo - @echo "Build finished. Dummy builder generates no files." - -.PHONY: livehtml -livehtml: - $(SPHINXABUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000000..19af9b6b404d --- /dev/null +++ b/docs/README.md @@ -0,0 +1,29 @@ +# Docs + +The docs are built using [Docusaurus 2](https://v2.docusaurus.io/). + +### Installation + +``` +$ yarn +``` + +### Local Development + +``` +$ yarn start +``` + +This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server. + +### Build + +``` +$ yarn build +``` + +This command generates static content into the `build` directory and can be served using any static contents hosting service. + +### Deployment + +Deployment is handled by Netlify: it is setup for listening to changes on the `documentation` branch. diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css deleted file mode 100644 index 347969b1811a..000000000000 --- a/docs/_static/css/custom.css +++ /dev/null @@ -1,8 +0,0 @@ -dl.glossary dt { - margin-top: 20px; - margin-bottom: 0px; -} - -dl.glossary dd { - margin-top: 2px; -} diff --git a/docs/_static/images/intents-user-goals-dialogue-elements.png b/docs/_static/images/intents-user-goals-dialogue-elements.png deleted file mode 100644 index 7db87c347721..000000000000 Binary files a/docs/_static/images/intents-user-goals-dialogue-elements.png and /dev/null differ diff --git a/docs/_static/images/interactive_learning_graph.gif b/docs/_static/images/interactive_learning_graph.gif deleted file mode 100644 index e0c4df4f2fe0..000000000000 Binary files a/docs/_static/images/interactive_learning_graph.gif and /dev/null differ diff --git a/docs/_static/images/mood_bot.png b/docs/_static/images/mood_bot.png deleted file mode 100644 index 3100011bb7cb..000000000000 Binary files a/docs/_static/images/mood_bot.png and /dev/null differ diff --git a/docs/api/action-server.rst b/docs/api/action-server.rst deleted file mode 100644 index e0b4a0be9b51..000000000000 --- a/docs/api/action-server.rst +++ /dev/null @@ -1,14 +0,0 @@ -:desc: Check out the API docs for open source chatbot framework Rasa's - action server, which allows you to define your own custom actions. - -:pagetype: full - -.. _action-server: - -Action Server -============= - -.. raw:: html - - <redoc spec-url='../../_static/spec/action-server.yml'></redoc> - <script src="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"> </script> diff --git a/docs/api/agent.rst b/docs/api/agent.rst deleted file mode 100644 index a1a658f7273a..000000000000 --- a/docs/api/agent.rst +++ /dev/null @@ -1,15 +0,0 @@ -:desc: The Agent class provides a central interface for performing crucial - operations like training, handling messages, loading a model, and - action prediction. - -.. _agent: - -Agent -===== - -.. edit-link:: - :url: https://github.com/RasaHQ/rasa/edit/master/rasa/core/agent.py - :text: SUGGEST DOCSTRING EDITS - -.. autoclass:: rasa.core.agent.Agent - :members: diff --git a/docs/api/custom-nlu-components.rst b/docs/api/custom-nlu-components.rst deleted file mode 100644 index 8990171f75de..000000000000 --- a/docs/api/custom-nlu-components.rst +++ /dev/null @@ -1,60 +0,0 @@ -:desc: Create custom components to create additional features like sentiment - analysis to integrate with open source bot framework Rasa. - -.. _custom-nlu-components: - -Custom NLU Components -===================== - -.. edit-link:: - -You can create a custom component to perform a specific task which NLU doesn't currently offer (for example, sentiment analysis). -Below is the specification of the :class:`rasa.nlu.components.Component` class with the methods you'll need to implement. - -.. note:: - There is a detailed tutorial on building custom components `here - <https://blog.rasa.com/enhancing-rasa-nlu-with-custom-components/>`_. - - -You can add a custom component to your pipeline by adding the module path. -So if you have a module called ``sentiment`` -containing a ``SentimentAnalyzer`` class: - - .. code-block:: yaml - - pipeline: - - name: "sentiment.SentimentAnalyzer" - - -Also be sure to read the section on the :ref:`section_component_lifecycle`. - -To get started, you can use this skeleton that contains the most important -methods that you should implement: - -.. literalinclude:: ../../tests/nlu/example_component.py - :language: python - :linenos: - - -Component -^^^^^^^^^ - -.. autoclass:: rasa.nlu.components.Component - - .. automethod:: required_packages - - .. automethod:: create - - .. automethod:: provide_context - - .. automethod:: train - - .. automethod:: process - - .. automethod:: persist - - .. automethod:: prepare_partial_processing - - .. automethod:: partially_process - - .. automethod:: can_handle_language diff --git a/docs/api/event-brokers.rst b/docs/api/event-brokers.rst deleted file mode 100644 index c3e3aad3dd74..000000000000 --- a/docs/api/event-brokers.rst +++ /dev/null @@ -1,281 +0,0 @@ -:desc: Find out how open source chatbot framework Rasa allows - you to stream events to a message broker. - -.. _event-brokers: - -Event Brokers -============= - -.. edit-link:: - -Rasa Core allows you to stream events to a message broker. The event broker -emits events into the event queue. It becomes part of the ``TrackerStore`` -which you use when starting an ``Agent`` or launch ``rasa.core.run``. - -All events are streamed to the broker as serialised dictionaries every time -the tracker updates it state. An example event emitted from the ``default`` -tracker looks like this: - -.. code-block:: json - - { - "sender_id": "default", - "timestamp": 1528402837.617099, - "event": "bot", - "text": "what your bot said", - "data": "some data" - } - -The ``event`` field takes the event's ``type_name`` (for more on event -types, check out the :ref:`events` docs). - -Rasa enables three possible broker types: - -- `Pika Event Broker`_ -- `Kafka Event Broker`_ -- `SQL Event Broker`_ - -Pika Event Broker ------------------ - -The example implementation we're going to show you here uses `Pika <https://pika.readthedocs.io>`_ , -the Python client library for `RabbitMQ <https://www.rabbitmq.com>`_. - -Adding a Pika Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can use an endpoint configuration file to instruct Rasa Core to stream -all events to your event broker. To do so, add the following section to your -endpoint configuration, e.g. ``endpoints.yml``: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/pika_endpoint.yml - -Then instruct Rasa Core to use the endpoint configuration and Pika producer by adding -``--endpoints <path to your endpoint configuration`` as following example: - -.. code-block:: shell - - rasa run -m models --endpoints endpoints.yml - -Adding a Pika Event Broker in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Here is how you add it using Python code: - -.. code-block:: python - - from rasa.core.event_brokers.pika_producer import PikaProducer - from rasa_platform.core.tracker_store import InMemoryTrackerStore - - pika_broker = PikaProducer('localhost', - 'username', - 'password', - queue='rasa_core_events') - - tracker_store = InMemoryTrackerStore(db=db, event_broker=pika_broker) - - -Implementing a Pika Event Consumer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You need to have a RabbitMQ server running, as well as another application -that consumes the events. This consumer to needs to implement Pika's -``start_consuming()`` method with a ``callback`` action. Here's a simple -example: - -.. code-block:: python - - import json - import pika - - - def _callback(self, ch, method, properties, body): - # Do something useful with your incoming message body here, e.g. - # saving it to a database - print('Received event {}'.format(json.loads(body))) - - if __name__ == '__main__': - - # RabbitMQ credentials with username and password - credentials = pika.PlainCredentials('username', 'password') - - # Pika connection to the RabbitMQ host - typically 'rabbit' in a - # docker environment, or 'localhost' in a local environment - connection = pika.BlockingConnection( - pika.ConnectionParameters('rabbit', credentials=credentials)) - - # start consumption of channel - channel = connection.channel() - channel.basic_consume(_callback, - queue='rasa_core_events', - no_ack=True) - channel.start_consuming() - -Kafka Event Broker ------------------- - -It is possible to use `Kafka <https://kafka.apache.org/>`_ as main broker for your events. In this example -we are going to use the `python-kafka <https://kafka-python.readthedocs.io/en/master/usage.html>`_ -library, a Kafka client written in Python. - -Adding a Kafka Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As for the other brokers, you can use an endpoint configuration file to instruct Rasa Core to stream -all events to this event broker. To do it, add the following section to your -endpoint configuration. - -Pass the ``endpoints.yml`` file as argument with ``--endpoints <path to your endpoint configuration>`` -when running Rasa, as following example: - -.. code-block:: shell - - rasa run -m models --endpoints endpoints.yml - -Using ``SASL_PLAINTEXT`` protocol the endpoints file must have the following entries: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/kafka_plaintext_endpoint.yml - -In the case of using SSL protocol the endpoints file must looks like: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/kafka_ssl_endpoint.yml - -Adding a Kafka Broker in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The code below shows an example on how to instantiate a Kafka producer in you script. - -.. code-block:: python - - from rasa.core.event_brokers.kafka_producer import KafkaProducer - from rasa.core.tracker_store import InMemoryTrackerStore - - kafka_broker = KafkaProducer(host='localhost:9092', - topic='rasa_core_events') - - tracker_store = InMemoryTrackerStore(event_broker=kafka_broker) - - -The host variable can be either a list of brokers adresses or a single one. -If only one broker address is available, the client will connect to it and -request the cluster Metadata. -Therefore, the remain brokers in the cluster can be discovered -automatically through the data served by the first connected broker. - -To pass more than one broker address as argument, they must be passed in a -list of strings. e.g.: - -.. code-block:: python - - kafka_broker = KafkaProducer(host=['kafka_broker_1:9092', - 'kafka_broker_2:2030', - 'kafka_broker_3:9092'], - topic='rasa_core_events') - -Authentication and authorization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Rasa Core's Kafka producer accepts two types of security protocols - ``SASL_PLAINTEXT`` and ``SSL``. - -For development environment, or if the brokers servers and clients are located -into the same machine, you can use simple authentication with ``SASL_PLAINTEXT``. -By using this protocol, the credentials and messages exchanged between the clients and servers -will be sent in plaintext. Thus, this is not the most secure approach, but since it's simple -to configure, it is useful for simple cluster configurations. -``SASL_PLAINTEXT`` protocol requires the setup of the ``username`` and ``password`` -previously configured in the broker server. - -.. code-block:: python - - kafka_broker = KafkaProducer(host='kafka_broker:9092', - sasl_plain_username='kafka_username', - sasl_plain_password='kafka_password', - security_protocol='SASL_PLAINTEXT', - topic='rasa_core_events') - - -If the clients or the brokers in the kafka cluster are located in different -machines, it's important to use ssl protocal to assure encryption of data and client -authentication. After generating valid certificates for the brokers and the -clients, the path to the certificate and key generated for the producer must -be provided as arguments, as well as the CA's root certificate. - -.. code-block:: python - - kafka_broker = KafkaProducer(host='kafka_broker:9092', - ssl_cafile='CARoot.pem', - ssl_certfile='certificate.pem', - ssl_keyfile='key.pem', - ssl_check_hostname=True, - security_protocol='SSL', - topic='rasa_core_events') - -If the ``ssl_check_hostname`` parameter is enabled, the clients will verify -if the broker's hostname matches the certificate. It's used on client's connections -and inter-broker connections to prevent man-in-the-middle attacks. - - -Implementing a Kafka Event Consumer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The parameters used to create a Kafka consumer is the same used on the producer creation, -according to the security protocol being used. The following implementation shows an example: - -.. code-block:: python - - from kafka import KafkaConsumer - from json import loads - - consumer = KafkaConsumer('rasa_core_events', - bootstrap_servers=['localhost:29093'], - value_deserializer=lambda m: json.loads(m.decode('utf-8')), - security_protocol='SSL', - ssl_check_hostname=False, - ssl_cafile='CARoot.pem', - ssl_certfile='certificate.pem', - ssl_keyfile='key.pem') - - for message in consumer: - print(message.value) - -SQL Event Broker ----------------- - -It is possible to use an SQL database as an event broker. Connections to databases are established using -`SQLAlchemy <https://www.sqlalchemy.org/>`_, a Python library which can interact with many -different types of SQL databases, such as `SQLite <https://sqlite.org/index.html>`_, -`PostgreSQL <https://www.postgresql.org/>`_ and more. The default Rasa installation allows connections to SQLite -and PostgreSQL databases, to see other options, please see the -`SQLAlchemy documentation on SQL dialects <https://docs.sqlalchemy.org/en/13/dialects/index.html>`_. - - -Adding a SQL Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can use the endpoint configuration file to instruct Rasa to save -all events to your SQL event broker. To do so, add a ``event_broker`` section to your -endpoint configuration, e.g. ``endpoints.yml``. For example, a valid SQLite configuration -could look like the following: - -.. code-block:: yaml - - event_broker: - type: SQL - dialect: sqlite - db: events.db - -PostgreSQL databases can be used as well: - -.. code-block:: yaml - - event_broker: - type: SQL - host: 127.0.0.1 - port: 5432 - dialect: postgresql - username: myuser - password: mypassword - db: mydatabase - -With this configuration applied, Rasa will create a table called ``events`` on the database, -where all events will be added. diff --git a/docs/api/events.rst b/docs/api/events.rst deleted file mode 100644 index f3cb0258b18e..000000000000 --- a/docs/api/events.rst +++ /dev/null @@ -1,273 +0,0 @@ -:desc: Use events in open source library Rasa Core to support functionalities - like resetting slots, scheduling reminder or pausing a conversation. - -.. _events: - -Events -====== - -.. edit-link:: - -Conversations in Rasa are represented as a sequence of events. -This page lists the event types defined in Rasa Core. - -.. note:: - If you are using the Rasa SDK to write custom actions in python, - you need to import the events from ``rasa_sdk.events``, not from - ``rasa.core.events``. If you are writing actions in another language, - your events should be formatted like the JSON objects on this page. - - - -.. contents:: - :local: - -General Purpose Events ----------------------- - -Set a Slot -~~~~~~~~~~ - -:Short: Event to set a slot on a tracker -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER SetSlot - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.SlotSet - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: SlotSet.apply_to - - -Restart a conversation -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Resets anything logged on the tracker. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER Restarted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.Restarted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: Restarted.apply_to - - -Reset all Slots -~~~~~~~~~~~~~~~ - -:Short: Resets all the slots of a conversation. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER AllSlotsReset - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.AllSlotsReset - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: AllSlotsReset.apply_to - - -Schedule a reminder -~~~~~~~~~~~~~~~~~~~ - -:Short: Schedule an action to be executed in the future. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :lines: 1- - :start-after: # DOCS MARKER ReminderScheduled - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ReminderScheduled - -:Effect: - When added to a tracker, core will schedule the action to be - run in the future. - -Pause a conversation -~~~~~~~~~~~~~~~~~~~~ - -:Short: Stops the bot from responding to messages. Action prediction - will be halted until resumed. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ConversationPaused - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ConversationPaused - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ConversationPaused.apply_to - - -Resume a conversation -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Resumes a previously paused conversation. The bot will start - predicting actions again. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ConversationResumed - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ConversationResumed - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ConversationResumed.apply_to - - -Force a followup action -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Instead of predicting the next action, force the next action - to be a fixed one. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER FollowupAction - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.FollowupAction - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: FollowupAction.apply_to - - -Automatically tracked events ----------------------------- - - -User sent message -~~~~~~~~~~~~~~~~~ - -:Short: Message a user sent to the bot. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :lines: 1- - :start-after: # DOCS MARKER UserUttered - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.UserUttered - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: UserUttered.apply_to - - -Bot responded message -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Message a bot sent to the user. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER BotUttered - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.BotUttered - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: BotUttered.apply_to - - -Undo a user message -~~~~~~~~~~~~~~~~~~~ - -:Short: Undoes all side effects that happened after the last user message - (including the ``user`` event of the message). -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER UserUtteranceReverted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.UserUtteranceReverted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: UserUtteranceReverted.apply_to - - -Undo an action -~~~~~~~~~~~~~~ - -:Short: Undoes all side effects that happened after the last action - (including the ``action`` event of the action). -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ActionReverted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ActionReverted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ActionReverted.apply_to - - -Log an executed action -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Logs an action the bot executed to the conversation. Events that - action created are logged separately. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ActionExecuted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ActionExecuted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ActionExecuted.apply_to diff --git a/docs/api/featurization.rst b/docs/api/featurization.rst deleted file mode 100644 index 90021cb68b07..000000000000 --- a/docs/api/featurization.rst +++ /dev/null @@ -1,119 +0,0 @@ -:desc: Find out how to apply machine learning algorithms to conversational AI - using vector representations of conversations with Rasa. - -.. _featurization: - -Featurization -============== - -.. edit-link:: - -In order to apply machine learning algorithms to conversational AI, we need -to build up vector representations of conversations. - -Each story corresponds to a tracker which consists of the states of the -conversation just before each action was taken. - - -State Featurizers -^^^^^^^^^^^^^^^^^ -Every event in a trackers history creates a new state (e.g. running a bot -action, receiving a user message, setting slots). Featurizing a single state -of the tracker has a couple steps: - -1. **Tracker provides a bag of active features**: - - features indicating intents and entities, if this is the first - state in a turn, e.g. it's the first action we will take after - parsing the user's message. (e.g. - ``[intent_restaurant_search, entity_cuisine]`` ) - - features indicating which slots are currently defined, e.g. - ``slot_location`` if the user previously mentioned the area - they're searching for restaurants. - - features indicating the results of any API calls stored in - slots, e.g. ``slot_matches`` - - features indicating what the last action was (e.g. - ``prev_action_listen``) - -2. **Convert all the features into numeric vectors**: - - We use the ``X, y`` notation that's common for supervised learning, - where ``X`` is an array of shape - ``(num_data_points, time_dimension, num_input_features)``, - and ``y`` is an array of shape ``(num_data_points, num_bot_features)`` - or ``(num_data_points, time_dimension, num_bot_features)`` - containing the target class labels encoded as one-hot vectors. - - The target labels correspond to actions taken by the bot. - To convert the features into vector format, there are different - featurizers available: - - - ``BinarySingleStateFeaturizer`` creates a binary one-hot encoding: - The vectors ``X, y`` indicate a presence of a certain intent, - entity, previous action or slot e.g. ``[0 0 1 0 0 1 ...]``. - - - ``LabelTokenizerSingleStateFeaturizer`` creates a vector - based on the feature label: - All active feature labels (e.g. ``prev_action_listen``) are split - into tokens and represented as a bag-of-words. For example, actions - ``utter_explain_details_hotel`` and - ``utter_explain_details_restaurant`` will have 3 features in - common, and differ by a single feature indicating a domain. - - Labels for user inputs (intents, entities) and bot actions - are featurized separately. Each label in the two categories - is tokenized on a special character ``split_symbol`` - (e.g. ``action_search_restaurant = {action, search, restaurant}``), - creating two vocabularies. A bag-of-words representation - is then created for each label using the appropriate vocabulary. - The slots are featurized as binary vectors, indicating - their presence or absence at each step of the dialogue. - - -.. note:: - - If the domain defines the possible ``actions``, - ``[ActionGreet, ActionGoodbye]``, - ``4`` additional default actions are added: - ``[ActionListen(), ActionRestart(), - ActionDefaultFallback(), ActionDeactivateForm()]``. - Therefore, label ``0`` indicates default action listen, label ``1`` - default restart, label ``2`` a greeting and ``3`` indicates goodbye. - - -Tracker Featurizers -^^^^^^^^^^^^^^^^^^^ - -It's often useful to include a bit more history than just the current state -when predicting an action. The ``TrackerFeaturizer`` iterates over tracker -states and calls a ``SingleStateFeaturizer`` for each state. There are two -different tracker featurizers: - -1. Full Dialogue ----------------- - -``FullDialogueTrackerFeaturizer`` creates numerical representation of -stories to feed to a recurrent neural network where the whole dialogue -is fed to a network and the gradient is backpropagated from all time steps. -Therefore, ``X`` is an array of shape -``(num_stories, max_dialogue_length, num_input_features)`` and -``y`` is an array of shape -``(num_stories, max_dialogue_length, num_bot_features)``. -The smaller dialogues are padded with ``-1`` for all features, indicating -no values for a policy. - -2. Max History --------------- - -``MaxHistoryTrackerFeaturizer`` creates an array of previous tracker -states for each bot action or utterance, with the parameter -``max_history`` defining how many states go into each row in ``X``. -Deduplication is performed to filter out duplicated turns (bot actions -or bot utterances) in terms of their previous states. Hence ``X`` -has shape ``(num_unique_turns, max_history, num_input_features)`` -and ``y`` is an array of shape ``(num_unique_turns, num_bot_features)``. - -For some algorithms a flat feature vector is needed, so ``X`` -should be reshaped to -``(num_unique_turns, max_history * num_input_features)``. If numeric -target class labels are needed instead of one-hot vectors, use -``y.argmax(axis=-1)``. diff --git a/docs/api/http-api.rst b/docs/api/http-api.rst deleted file mode 100644 index 9bdf0adb9c99..000000000000 --- a/docs/api/http-api.rst +++ /dev/null @@ -1,14 +0,0 @@ -:desc: Read about Rasa's HTTP API that has endpoints for conversations, - training models, and configuring your bot. - -:pagetype: full - -.. _http-api: - -HTTP API -======== - -.. raw:: html - - <redoc spec-url='../../_static/spec/rasa.yml'></redoc> - <script src="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"> </script> \ No newline at end of file diff --git a/docs/api/jupyter-notebooks.rst b/docs/api/jupyter-notebooks.rst deleted file mode 100644 index dab21d0f5e38..000000000000 --- a/docs/api/jupyter-notebooks.rst +++ /dev/null @@ -1,143 +0,0 @@ -:desc: Learn how to integrate open source chatbot platform Rasa into - Jupyter notebooks, alongside all your machine learning code. - -.. _jupyter-notebooks: - -Jupyter Notebooks -================= - -.. edit-link:: - -This page contains the most important methods for using Rasa in a Jupyter notebook. - -Running asynchronous Rasa code in Jupyter Notebooks requires an extra requirement, -since Jupyter Notebooks already run on event loops. Install this requirement in -the command line before launching jupyter: - -.. code-block:: bash - - pip install nest_asyncio - -Then in the first cell of your notebook, include: - -.. runnable:: - - import nest_asyncio - - nest_asyncio.apply() - print("Event loop ready.") - - -First, you need to create a project if you don't already have one. -To do this, run this cell, which will create the ``test-project`` directory and make it -your working directory: - -.. runnable:: - - from rasa.cli.scaffold import create_initial_project - import os - - project = "test-project" - create_initial_project(project) - - # move into project directory and show files - os.chdir(project) - print(os.listdir(".")) - - -To train a model, you will have to tell the ``train`` function -where to find the relevant files. -To define variables that contain these paths, run: - - -.. runnable:: - - config = "config.yml" - training_files = "data/" - domain = "domain.yml" - output = "models/" - print(config, training_files, domain, output) - - - - -Train a Model -~~~~~~~~~~~~~ - -Now we can train a model by passing in the paths to the ``rasa.train`` function. -Note that the training files are passed as a list. -When training has finished, ``rasa.train`` returns the path where the trained model has been saved. - - - -.. runnable:: - - import rasa - - model_path = rasa.train(domain, config, [training_files], output) - print(model_path) - - - - -Chat with your assistant -~~~~~~~~~~~~~~~~~~~~~~~~ - -To start chatting to an assistant, call the ``chat`` function, passing -in the path to your saved model: - - -.. runnable:: - - from rasa.jupyter import chat - chat(model_path) - - - -Evaluate your model against test data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Rasa has a convenience function for getting your training data. -Rasa's ``get_core_nlu_directories`` is a function which -recursively finds all the stories and NLU data files in a directory -and copies them into two temporary directories. -The return values are the paths to these newly created directories. - -.. runnable:: - - import rasa.data as data - stories_directory, nlu_data_directory = data.get_core_nlu_directories(training_files) - print(stories_directory, nlu_data_directory) - - - -To test your model, call the ``test`` function, passing in the path -to your saved model and directories containing the stories and nlu data -to evaluate on. - -.. runnable:: - - rasa.test(model_path, stories_directory, nlu_data_directory) - print("Done testing.") - - -The results of the core evaluation will be written to a file called ``results``. -NLU errors will be reported to ``errors.json``. -Together, they contain information about the accuracy of your model's -predictions and other metrics. - -.. runnable:: - - if os.path.isfile("errors.json"): - print("NLU Errors:") - print(open("errors.json").read()) - else: - print("No NLU errors.") - - if os.path.isdir("results"): - print("\n") - print("Core Errors:") - print(open("results/failed_stories.md").read()) - -.. juniper:: - :language: python diff --git a/docs/api/tracker-stores.rst b/docs/api/tracker-stores.rst deleted file mode 100644 index bdc40beddcd0..000000000000 --- a/docs/api/tracker-stores.rst +++ /dev/null @@ -1,169 +0,0 @@ -:desc: All conversations are stored within a tracker store. Read how open source - library Rasa Core provides implementations for different store types out - of the box. - -.. _tracker-stores: - -Tracker Stores -============== - -.. edit-link:: - -All conversations are stored within a `tracker store`. -Rasa Core provides implementations for different store types out of the box. -If you want to use another store, you can also build a custom tracker store by extending the `TrackerStore` class. - -.. contents:: - -InMemoryTrackerStore (default) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Description: - `InMemoryTrackerStore` is the default tracker store. It is used if no other tracker store is configured. - It stores the conversation history in memory. - - .. note:: As this store keeps all history in memory the entire history is lost if you restart Rasa Core. - -:Configuration: - To use the `InMemoryTrackerStore` no configuration is needed. - -SQLTrackerStore -~~~~~~~~~~~~~~~ - -:Description: - ``SQLTrackerStore`` can be used to store the conversation history in an SQL database. - Storing your trackers this way allows you to query the event database by sender_id, timestamp, action name, - intent name and typename - -:Configuration: - To set up Rasa Core with SQL the following steps are required: - - 1. Add required configuration to your `endpoints.yml` - - .. code-block:: yaml - - tracker_store: - type: SQL - dialect: "sqlite" # the dialect used to interact with the db - url: "" # (optional) host of the sql db, e.g. "localhost" - db: "rasa.db" # path to your db - username: # username used for authentication - password: # password used for authentication - query: # optional dictionary to be added as a query string to the connection URL - driver: my-driver - - 3. To start the Rasa Core server using your SQL backend, - add the ``--endpoints`` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml -:Parameters: - - ``domain`` (default: ``None``): Domain object associated with this tracker store - - ``dialect`` (default: ``sqlite``): The dialect used to communicate with your SQL backend. Consult the `SQLAlchemy docs <https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_ for available dialects. - - ``host`` (default: ``None``): URL of your SQL server - - ``port`` (default: ``None``): Port of your SQL server - - ``db`` (default: ``rasa.db``): The path to the database to be used - - ``username`` (default: ``None``): The username which is used for authentication - - ``password`` (default: ``None``): The password which is used for authentication - - ``event_broker`` (default: ``None``): Event broker to publish events to - - ``login_db`` (default: ``None``): Alternative database name to which initially connect, and create the database specified by `db` (PostgreSQL only) - - ``query`` (default: ``None``): Dictionary of options to be passed to the dialect and/or the DBAPI upon connect - -RedisTrackerStore -~~~~~~~~~~~~~~~~~~ - -:Description: - `RedisTrackerStore` can be used to store the conversation history in `Redis <https://redis.io/>`_. - Redis is a fast in-memory key-value store which can optionally also persist data. - -:Configuration: - To set up Rasa Core with Redis the following steps are required: - - 1. Start your Redis instance - 2. Add required configuration to your `endpoints.yml` - - .. code-block:: yaml - - tracker_store: - type: redis - url: <url of the redis instance, e.g. localhost> - port: <port of your redis instance, usually 6379> - db: <number of your database within redis, e.g. 0> - password: <password used for authentication> - - 3. To start the Rasa Core server using your configured Redis instance, - add the :code:`--endpoints` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml -:Parameters: - - ``url`` (default: ``localhost``): The url of your redis instance - - ``port`` (default: ``6379``): The port which redis is running on - - ``db`` (default: ``0``): The number of your redis database - - ``password`` (default: ``None``): Password used for authentication - (``None`` equals no authentication) - - ``record_exp`` (default: ``None``): Record expiry in seconds - -MongoTrackerStore -~~~~~~~~~~~~~~~~~ - -:Description: - `MongoTrackerStore` can be used to store the conversation history in `Mongo <https://www.mongodb.com/>`_. - MongoDB is a free and open-source cross-platform document-oriented NoSQL database. - -:Configuration: - 1. Start your MongoDB instance. - 2. Add required configuration to your `endpoints.yml` - - .. code-block:: yaml - - tracker_store: - type: mongod - url: <url to your mongo instance, e.g. mongodb://localhost:27017> - db: <name of the db within your mongo instance, e.g. rasa> - username: <username used for authentication> - password: <password used for authentication> - auth_source: <database name associated with the user’s credentials> - - You can also add more advanced configurations (like enabling ssl) by appending - a parameter to the url field, e.g. mongodb://localhost:27017/?ssl=true - - 3. To start the Rasa Core server using your configured MongoDB instance, - add the :code:`--endpoints` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml -:Parameters: - - ``url`` (default: ``mongodb://localhost:27017``): URL of your MongoDB - - ``db`` (default: ``rasa``): The database name which should be used - - ``username`` (default: ``0``): The username which is used for authentication - - ``password`` (default: ``None``): The password which is used for authentication - - ``collection`` (default: ``conversations``): The collection name which is - used to store the conversations - - ``auth_source`` (default: ``admin``): database name associated with the user’s credentials. - -Custom Tracker Store -~~~~~~~~~~~~~~~~~~~~ - -:Description: - If you require a tracker store which is not available out of the box, you can implement your own. - This is done by extending the base class `TrackerStore`. - - .. autoclass:: rasa.core.tracker_store.TrackerStore - -:Steps: - 1. Extend the `TrackerStore` base class. Note that your constructor has to - provide a parameter ``url``. - 2. In your endpoints.yml put in the module path to your custom tracker store - and the parameters you require: - - .. code-block:: yaml - - tracker_store: - type: path.to.your.module.Class - url: localhost - a_parameter: a value - another_parameter: another value diff --git a/docs/api/tracker.rst b/docs/api/tracker.rst deleted file mode 100644 index 07d156463433..000000000000 --- a/docs/api/tracker.rst +++ /dev/null @@ -1,15 +0,0 @@ -:desc: Trackers mantain the state of the a dialogue and can be - featurized for machine learning algorithms right out of - the box. - -.. _tracker: - -Tracker -======= - -.. edit-link:: - :url: https://github.com/RasaHQ/rasa/edit/master/rasa/core/trackers.py - :text: SUGGEST DOCSTRING EDITS - -.. autoclass:: rasa.core.trackers.DialogueStateTracker - :members: diff --git a/docs/api/training-data-importers.rst b/docs/api/training-data-importers.rst deleted file mode 100644 index 51b230691bf2..000000000000 --- a/docs/api/training-data-importers.rst +++ /dev/null @@ -1,232 +0,0 @@ -:desc: Change the way Rasa imports training data by replacing the default importer or - writing your own importer. - -.. _training-data-importers: - -Training Data Importers -======================= - -.. edit-link:: - -.. contents:: - :local: - -By default, you can use command line arguments to specify where Rasa should look -for training data on your disk. Rasa then loads any potential training files and uses -them to train your assistant. - -If needed, you can also customize `how` Rasa imports training data. -Potential use cases for this might be: - -- using a custom parser to load training data in other formats -- using different approaches to collect training data (e.g. loading them from different resources) - -You can instruct Rasa to load and use your custom importer by adding the section -``importers`` to the Rasa configuration file and specifying the importer with its -full class path: - -.. code-block:: yaml - - importers: - - name: "module.CustomImporter" - parameter1: "value" - parameter2: "value2" - - name: "module.AnotherCustomImporter" - -The ``name`` key is used to determine which importer should be loaded. Any extra -parameters are passed as constructor arguments to the loaded importer. - -.. note:: - - You can specify multiple importers. Rasa will automatically merge their results. - - -RasaFileImporter (default) -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default Rasa uses the importer ``RasaFileImporter``. If you want to use it on its -own, you don't have to specify anything in your configuration file. -If you want to use it together with other importers, add it to your -configuration file: - -.. code-block:: yaml - - importers: - - name: "RasaFileImporter" - -MultiProjectImporter (experimental) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. warning:: - - This feature is currently experimental and might change or be removed in the future. - Please share your feedback on it in the `forum <https://forum.rasa.com>`_ to help - us making this feature ready for production. - -With this importer you can build a contextual AI assistant by combining multiple -reusable Rasa projects. -You might, for example, handle chitchat with one project and greet your users with -another. These projects can be developed in isolation, and then combined at train time -to create your assistant. - -An example directory structure could look like this: - -.. code-block:: bash - - . - ├── config.yml - └── projects - ├── GreetBot - │   ├── data - │   │   ├── nlu.md - │   │   └── stories.md - │   └── domain.yml - └── ChitchatBot - ├── config.yml - ├── data - │   ├── nlu.md - │   └── stories.md - └── domain.yml - -In this example the contextual AI assistant imports the ``ChitchatBot`` project which in turn -imports the ``GreetBot`` project. Project imports are defined in the configuration files of -each project. -To instruct Rasa to use the ``MultiProjectImporter`` module, put this section in the config -file of your root project: - -.. code-block:: yaml - - importers: - - name: MultiProjectImporter - - -Then specify which projects you want to import. -In our example, the ``config.yml`` in the root project would look like this: - -.. code-block:: yaml - - imports: - - projects/ChitchatBot - -The configuration file of the ``ChitchatBot`` in turn references the ``GreetBot``: - -.. code-block:: yaml - - imports: - - ../GreetBot - -The ``GreetBot`` project does not specify further projects so the ``config.yml`` can be -omitted. - -Rasa uses relative paths from the referencing configuration file to import projects. -These can be anywhere on your file system as long as the file access is permitted. - -During the training process Rasa will import all required training files, combine -them, and train a unified AI assistant. The merging of the training data happens during -runtime, so no additional files with training data are created or visible. - -.. note:: - - Rasa will use the policy and NLU pipeline configuration of the root project - directory during training. **Policy or NLU configurations of imported projects - will be ignored.** - -.. note:: - - Equal intents, entities, slots, templates, actions and forms will be merged, - e.g. if two projects have training data for an intent ``greet``, - their training data will be combined. - -Writing a Custom Importer -~~~~~~~~~~~~~~~~~~~~~~~~~ -If you are writing a custom importer, this importer has to implement the interface of -:ref:`training-data-importers-trainingFileImporter`: - -.. code-block:: python - - from typing import Optional, Text, Dict, List, Union - - import rasa - from rasa.core.domain import Domain - from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter - from rasa.core.training.structures import StoryGraph - from rasa.importers.importer import TrainingDataImporter - from rasa.nlu.training_data import TrainingData - - - class MyImporter(TrainingDataImporter): - """Example implementation of a custom importer component.""" - - def __init__( - self, - config_file: Optional[Text] = None, - domain_path: Optional[Text] = None, - training_data_paths: Optional[Union[List[Text], Text]] = None, - **kwargs: Dict - ): - """Constructor of your custom file importer. - - Args: - config_file: Path to configuration file from command line arguments. - domain_path: Path to domain file from command line arguments. - training_data_paths: Path to training files from command line arguments. - **kwargs: Extra parameters passed through configuration in configuration file. - """ - - pass - - async def get_domain(self) -> Domain: - path_to_domain_file = self._custom_get_domain_file() - return Domain.load(path_to_domain_file) - - def _custom_get_domain_file(self) -> Text: - pass - - async def get_stories( - self, - interpreter: "NaturalLanguageInterpreter" = RegexInterpreter(), - template_variables: Optional[Dict] = None, - use_e2e: bool = False, - exclusion_percentage: Optional[int] = None, - ) -> StoryGraph: - from rasa.core.training.dsl import StoryFileReader - - path_to_stories = self._custom_get_story_file() - return await StoryFileReader.read_from_file(path_to_stories, await self.get_domain()) - - def _custom_get_story_file(self) -> Text: - pass - - async def get_config(self) -> Dict: - path_to_config = self._custom_get_config_file() - return rasa.utils.io.read_config_file(path_to_config) - - def _custom_get_config_file(self) -> Text: - pass - - async def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: - from rasa.nlu.training_data import loading - - path_to_nlu_file = self._custom_get_nlu_file() - return loading.load_data(path_to_nlu_file) - - def _custom_get_nlu_file(self) -> Text: - pass - - - -.. _training-data-importers-trainingFileImporter: - -TrainingDataImporter -~~~~~~~~~~~~~~~~~~~~ - - -.. autoclass:: rasa.importers.importer.TrainingDataImporter - - .. automethod:: get_domain - - .. automethod:: get_config - - .. automethod:: get_nlu_data - - .. automethod:: get_stories diff --git a/docs/changelog.rst b/docs/changelog.rst deleted file mode 100644 index 76888fe1bf10..000000000000 --- a/docs/changelog.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. _changelog: - -.. include:: ../CHANGELOG.rst diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 7df1ea824dc0..000000000000 --- a/docs/conf.py +++ /dev/null @@ -1,402 +0,0 @@ -# -*- coding: utf-8 -*- -# -# -- General configuration ------------------------------------------------ -import re -import sys - -nitpicky = True -linkcheck_anchors_ignore = [".*"] -linkcheck_ignore = [ - r"http://localhost:\d+/", - r"https://github.com/mit-nlp/MITIE/releases/download/", -] -linkcheck_retries = 2 -linkcheck_timeout = 5 -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx_autodoc_typehints", - "sphinx.ext.napoleon", - "sphinx.ext.mathjax", - "sphinx.ext.doctest", - "sphinx.ext.extlinks", - "sphinx_tabs.tabs", - "sphinxcontrib.programoutput", - "sphinxcontrib.httpdomain", - "rasabaster.button", - "rasabaster.card", - "rasabaster.chatbubble", - "rasabaster.copyable", - "rasabaster.editlink", - "rasabaster.runnable", - "rasabaster.conversations", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "Rasa" -copyright = "2019, Rasa Technologies" -author = "Rasa Technologies" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -__version__ = None -exec (open("../rasa/version.py").read()) -version = ".".join(__version__.split(".")[:2]) -# The full version, including alpha/beta/rc tags. -release = __version__ - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [ - "_build", - "Thumbs.db", - ".DS_Store", - # ignore doc pages that we don't show to appease keep_warnings - "core/old-core-change-log.rst", - "core/old-core-migration-guide.rst", - "nlu/old-nlu-change-log.rst", - "nlu/old-nlu-migration-guide.rst", -] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme = 'default' - -html_theme = "rasabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Rasa", - "github_user": "RasaHQ", - "github_repo": "rasa_nlu", - "fixed_sidebar": True, - "product": "Rasa", - "base_url": "https://rasa.com/docs/rasa/", -} -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. -# "<project> v<release> documentation" by default. -html_title = "" - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -# html_last_updated_fmt = None - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = {"**": ["simpletoc.html"]} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a <link> tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "rasa_doc" - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', - # Latex figure (float) alignment - #'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "rasa_nlu.tex", "rasa\\_nlu Documentation", "Alan Nichol", "manual") -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "rasa_nlu", "rasa_nlu Documentation", [author], 1)] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "rasa", - "rasa Documentation", - author, - "rasa", - "One line description of project.", - "Miscellaneous", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -import os - -doctest_path = [os.path.abspath("..")] - -# Make sure we are using the project root as the working directory instead of /docs -doctest_global_setup = r""" -import os -os.chdir(os.path.abspath('..')) -""" - -# extlinks configuration - -extlinks = { - "gh-code": ("https://github.com/RasaHQ/rasa/tree/{}/%s".format(release), "github ") -} - -# Sphinxcontrib configuration -scv_priority = "tags" -scv_show_banner = True -scv_banner_greatest_tag = True -scv_sort = ("semver",) -scv_whitelist_branches = (re.compile("^master$"),) -# scv_whitelist_tags = ('None',) -scv_grm_exclude = ("README.md", ".gitignore", ".nojekyll", "CNAME") -scv_whitelist_tags = ( - re.compile(r"^[2-9]+\.\d+\.\d+$"), - re.compile(r"^1\.[456789]+\.\d+$"), - re.compile(r"^1\.3\.\d+$"), - "1.2.7", - "1.1.8", - "1.0.9", -) -scv_greatest_tag = True - -# type classes for nitpicky to ignore -nitpick_ignore = [ - # non-rasa typing - ("py:class", "str"), - ("py:class", "bool"), - ("py:class", "int"), - ("py:class", "Any"), - ("py:class", "Dict"), - ("py:class", "List"), - ("py:class", "Text"), - ("py:class", "Optional"), - ("py:class", "Iterator"), - ("py:class", "typing.Any"), - ("py:class", "typing.Dict"), - ("py:class", "typing.List"), - ("py:class", "typing.Optional"), - ("py:class", "typing.Generator"), - ("py:class", "typing.Iterator"), - ("py:class", "collections.deque"), - ("py:class", "sanic.app.Sanic"), - ("py:data", "typing.Any"), - ("py:data", "typing.Dict"), - ("py:data", "typing.List"), - ("py:data", "typing.Optional"), - ("py:data", "typing.Iterator"), - ("py:obj", "None"), - # rasa typing - ("py:class", "CollectingDispatcher"), - ("py:class", "Tracker"), - ("py:class", "rasa.core.agent.Agent"), - ("py:class", "rasa.core.conversation.Dialogue"), - ("py:class", "rasa.core.domain.Domain"), - ("py:class", "rasa.core.policies.Policy"), - ("py:class", "rasa.core.events.Event"), - ("py:class", "rasa.core.events.SlotSet"), - ("py:class", "rasa.core.processor.MessageProcessor"), - ("py:class", "rasa.core.training.structures.StoryGraph"), - ("py:class", "rasa.nlu.components.Component"), - ("py:class", "rasa.nlu.training_data.message.Message"), - ("py:class", "rasa.nlu.training_data.training_data.TrainingData"), -] - - -def setup(sphinx): - sphinx.add_stylesheet("css/custom.css") - - try: - utils_path = os.path.abspath(os.path.join(__file__, "..", "utils")) - sys.path.insert(0, utils_path) - from StoryLexer import StoryLexer - - sphinx.add_lexer("story", StoryLexer()) - except ImportError: - print ("No Story Lexer :( Sad times!") diff --git a/docs/core/about.rst b/docs/core/about.rst deleted file mode 100644 index ccb32e6b0dcf..000000000000 --- a/docs/core/about.rst +++ /dev/null @@ -1,55 +0,0 @@ -:desc: Get started with machine learning dialogue management to scale your bot - development using Rasa as a conversational AI platform. - -.. _about-rasa-core: - -The Rasa Core Dialogue Engine -============================= - -.. chat-bubble:: - :text: What am I looking at? - :sender: bot - - -.. chat-bubble:: - :text: Rasa Core is a dialogue engine for building AI assistants. - :sender: user - -.. chat-bubble:: - :text: It's part of the open source Rasa framework. - :sender: user - -.. chat-bubble:: - :text: What's cool about it? - :sender: bot - -.. chat-bubble:: - :text: Rather than a bunch of if/else statements, it uses a machine learning model trained on example conversations to decide what to do next. - :sender: user - -.. chat-bubble:: - :text: That sounds harder than writing a few if statements. - :sender: bot - - -.. chat-bubble:: - :text: In the beginning of a project, it seems easier to just hard-code some logic. - :sender: user - -.. chat-bubble:: - :text: Rasa helps you when you want to go past that and create a bot that can handle more complexity. - This <a href="https://medium.com/rasa-blog/a-new-approach-to-conversational-software-2e64a5d05f2a" target="_blank">blog post</a> explains the philosophy behind Rasa Core. - :sender: user - - -.. chat-bubble:: - :text: Can I see it in action? - :sender: bot - -.. chat-bubble:: - :text: We thought you'd never ask! - :sender: user - -.. chat-bubble:: - :text: Head over to the <a href="../../user-guide/rasa-tutorial">Rasa Tutorial</a> for an interactive example. - :sender: user diff --git a/docs/core/actions.rst b/docs/core/actions.rst deleted file mode 100644 index 3d3dd52f33ae..000000000000 --- a/docs/core/actions.rst +++ /dev/null @@ -1,260 +0,0 @@ -:desc: Learn about about how to write your own custom actions with the - open source Rasa framework to be able to interact with the external - world - ranging from databases to third-party APIs. - -.. _actions: - -Actions -======= - -.. edit-link:: - -Actions are the things your bot runs in response to user input. -There are four kinds of actions in Rasa: - - 1. **Utterance actions**: start with ``utter_`` and send a specific message - to the user - 2. **Retrieval actions**: start with ``respond_`` and send a message selected by a retrieval model - 3. **Custom actions**: run arbitrary code and send any number of messages (or none). - 4. **Default actions**: e.g. ``action_listen``, ``action_restart``, - ``action_default_fallback`` - -.. contents:: - :local: - -Utterance Actions ------------------ - -To define an utterance action (``ActionUtterTemplate``), add an utterance template to the domain file -that starts with ``utter_``: - -.. code-block:: yaml - - templates: - utter_my_message: - - "this is what I want my action to say!" - -It is conventional to start the name of an utterance action with ``utter_``. -If this prefix is missing, you can still use the template in your custom -actions, but the template can not be directly predicted as its own action. -See :ref:`responses` for more details. - -If you use an external NLG service, you don't need to specify the -templates in the domain, but you still need to add the utterance names -to the actions list of the domain. - - -Retrieval Actions ------------------ - -Retrieval actions make it easier to work with a large number of similar intents like chitchat and FAQs. -See :ref:`retrieval-actions` to learn moree. - -.. _custom-actions: - -Custom Actions --------------- - -An action can run any code you want. Custom actions can turn on the lights, -add an event to a calendar, check a user's bank balance, or anything -else you can imagine. - -Rasa will call an endpoint you can specify, when a custom action is -predicted. This endpoint should be a webserver that reacts to this -call, runs the code and optionally returns information to modify -the dialogue state. - -To specify, your action server use the ``endpoints.yml``: - -.. code-block:: yaml - - action_endpoint: - url: "http://localhost:5055/webhook" - -And pass it to the scripts using ``--endpoints endpoints.yml``. - -You can create an action server in node.js, .NET, java, or any -other language and define your actions there - but we provide -a small python SDK to make development there even easier. - -.. note:: - - Rasa uses a ticket lock mechanism to ensure incoming messages from the same - conversation ID do not interfere with each other and are processed in the right - order. If you expect your custom action to take more than 60 seconds to run, please - set the ``TICKET_LOCK_LIFETIME`` environment variable to your expected value. - -Custom Actions Written in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For actions written in python, we have a convenient SDK which starts -this action server for you. - -The only thing your action server needs to install is ``rasa-sdk``: - -.. code-block:: bash - - pip install rasa-sdk - -.. note:: - - You do not need to install ``rasa`` for your action server. - E.g. it is recommended to run Rasa in a docker container and - create a separate container for your action server. In this - separate container, you only need to install ``rasa-sdk``. - -The file that contains your custom actions should be called ``actions.py``. - -If you have ``rasa`` installed, run this command to start your action server: - -.. code-block:: bash - - rasa run actions - -.. _custom_action_example: - -Otherwise, if you do not have ``rasa`` installed, run this command: - -.. code-block:: bash - - python -m rasa_sdk --actions actions - -.. _custom_action_example_verbose: - -In a restaurant bot, if the user says "show me a Mexican restaurant", -your bot could execute the action ``ActionCheckRestaurants``, -which might look like this: - -.. testcode:: - - from rasa_sdk import Action - from rasa_sdk.events import SlotSet - - class ActionCheckRestaurants(Action): - def name(self) -> Text: - return "action_check_restaurants" - - def run(self, - dispatcher: CollectingDispatcher, - tracker: Tracker, - domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: - - cuisine = tracker.get_slot('cuisine') - q = "select * from restaurants where cuisine='{0}' limit 1".format(cuisine) - result = db.query(q) - - return [SlotSet("matches", result if result is not None else [])] - - -You should add the the action name ``action_check_restaurants`` to -the actions in your domain file. The action's ``run`` method receives -three arguments. You can access the values of slots and the latest message -sent by the user using the ``tracker`` object, and you can send messages -back to the user with the ``dispatcher`` object, by calling -``dispatcher.utter_template``, ``dispatcher.utter_message``, or any other -``rasa_sdk.executor.CollectingDispatcher`` method. - -Details of the ``run()`` method: - -.. automethod:: rasa_sdk.Action.run - - -There is an example of a ``SlotSet`` event -:ref:`above <custom_action_example>`, and a full list of possible -events in :ref:`Events <events>`. - -Execute Actions in Other Code ------------------------------ - -Rasa will send an HTTP ``POST`` request to your server containing -information on which action to run. Furthermore, this request will contain all -information about the conversation. :ref:`action-server` shows the detailed API spec. - -As a response to the action call from Rasa, you can modify the tracker, -e.g. by setting slots and send responses back to the user. -All of the modifications are done using events. -There is a list of all possible event types in :ref:`events`. - -Proactively Reaching Out to the User Using Actions --------------------------------------------------- - -You may want to proactively reach out to the user, -for example to display the output of a long running background operation -or notify the user of an external event. - -To do so, you can ``POST`` to this -`endpoint <../../api/http-api.html#tag/Tracker/paths/~1conversations~1{conversation_id}~1execute/post>`_ , -specifying the action which should be run for a specific user in the request body. Use the -``output_channel`` query parameter to specify which output -channel should be used to communicate the assistant's responses back to the user. -If your message is static, you can define an ``utter_`` action in your domain file with -a corresponding template. If you need more control, add a custom action in your -domain and implement the required steps in your action server. Any messages which are -dispatched in the custom action will be forwarded to the specified output channel. - - -Proactively reaching out to the user is dependent on the abilities of a channel and -hence not supported by every channel. If your channel does not support it, consider -using the :ref:`callbackInput` channel to send messages to a webhook. - - -.. note:: - - Running an action in a conversation changes the conversation history and affects the - assistant's next predictions. If you don't want this to happen, make sure that your action - reverts itself by appending a ``ActionReverted`` event to the end of the - conversation tracker. - -.. _default-actions: - -Default Actions ---------------- - -There are eight default actions: - -+-----------------------------------+------------------------------------------------+ -| ``action_listen`` | Stop predicting more actions and wait for user | -| | input. | -+-----------------------------------+------------------------------------------------+ -| ``action_restart`` | Reset the whole conversation. Can be triggered | -| | during a conversation by entering ``/restart`` | -| | if the :ref:`mapping-policy` is included in | -| | the policy configuration. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_fallback`` | Undo the last user message (as if the user did | -| | not send it and the bot did not react) and | -| | utter a message that the bot did not | -| | understand. See :ref:`fallback-actions`. | -+-----------------------------------+------------------------------------------------+ -| ``action_deactivate_form`` | Deactivate the active form and reset the | -| | requested slot. | -| | See also :ref:`section_unhappy`. | -+-----------------------------------+------------------------------------------------+ -| ``action_revert_fallback_events`` | Revert events that occurred during the | -| | TwoStageFallbackPolicy. | -| | See :ref:`fallback-actions`. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_ask_affirmation``| Ask the user to affirm their intent. | -| | It is suggested to overwrite this default | -| | action with a custom action to have more | -| | meaningful prompts. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_ask_rephrase`` | Ask the user to rephrase their intent. | -+-----------------------------------+------------------------------------------------+ -| ``action_back`` | Undo the last user message (as if the user did | -| | not send it and the bot did not react). | -| | Can be triggered during a conversation by | -| | entering ``/back`` if the MappingPolicy is | -| | included in the policy configuration. | -+-----------------------------------+------------------------------------------------+ - -All the default actions can be overwritten. To do so, add the action name -to the list of actions in your domain: - -.. code-block:: yaml - - actions: - - action_default_ask_affirmation - -Rasa will then call your action endpoint and treat it as every other -custom action. diff --git a/docs/core/domains.rst b/docs/core/domains.rst deleted file mode 100644 index 123f58bd4cfb..000000000000 --- a/docs/core/domains.rst +++ /dev/null @@ -1,318 +0,0 @@ -:desc: Define intents, entities, slots and actions in Rasa to build contextual - AI Assistants and chatbots using open source bot framework Rasa. - -.. _domains: - -Domains -======= - -.. edit-link:: - -The ``Domain`` defines the universe in which your assistant operates. -It specifies the ``intents``, ``entities``, ``slots``, and ``actions`` -your bot should know about. Optionally, it can also include ``templates`` -for the things your bot can say. - -.. contents:: - :local: - - -An example of a Domain ----------------------- - -As an example, the ``DefaultDomain`` has the following yaml definition: - - -.. literalinclude:: ../../rasa/cli/initial_project/domain.yml - :language: yaml - -**What does this mean?** - -Your NLU model will define the ``intents`` and ``entities`` that you -need to include in the domain. - -:ref:`slots` hold information you want to keep track of during a conversation. -A categorical slot called ``risk_level`` would be -defined like this: - -.. code-block:: yaml - - slots: - risk_level: - type: categorical - values: - - low - - medium - - high - - -:ref:`Here <slot-classes>` you can find the full list of slot types defined by -Rasa Core, along with syntax for including them in your domain file. - - -:ref:`actions` are the things your bot can actually do. -For example, an action could: - -* respond to a user, -* make an external API call, -* query a database, or -* just about anything! - -Custom Actions and Slots ------------------------- - -To reference slots in your domain, you need to reference them by -their **module path**. To reference custom actions, use their **name**. -For example, if you have a module called ``my_actions`` containing -a class ``MyAwesomeAction``, and module ``my_slots`` containing -``MyAwesomeSlot``, you would add these lines to the domain file: - -.. code-block:: yaml - - actions: - - my_custom_action - ... - - slots: - - my_slots.MyAwesomeSlot - - -The ``name`` function of ``MyAwesomeAction`` needs to return -``my_custom_action`` in this example (for more details, -see :ref:`custom-actions`). - -.. _utter_templates: - -Utterance templates -------------------- - -Utterance templates are messages the bot will send back to the user. There are -two ways to use these templates: - -1. If the name of the template starts with ``utter_``, the utterance can - directly be used as an action. You would add the utterance template - to the domain: - - .. code-block:: yaml - - templates: - utter_greet: - - text: "Hey! How are you?" - - Afterwards, you can use the template as an action in the - stories: - - .. code-block:: story - - ## greet the user - * intent_greet - - utter_greet - - When ``utter_greet`` is run as an action, it will send the message from - the template back to the user. - -2. You can use the templates to generate response messages from your - custom actions using the dispatcher: - ``dispatcher.utter_template("utter_greet", tracker)``. - This allows you to separate the logic of generating - the messages from the actual copy. In you custom action code, you can - send a message based on the template like this: - - .. code-block:: python - - from rasa_sdk.actions import Action - - class ActionGreet(Action): - def name(self): - return 'action_greet' - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_template("utter_greet", tracker) - return [] - -Images and Buttons ------------------- - -Templates defined in a domain's yaml file can contain images and -buttons as well: - -.. code-block:: yaml - - templates: - utter_greet: - - text: "Hey! How are you?" - buttons: - - title: "great" - payload: "great" - - title: "super sad" - payload: "super sad" - utter_cheer_up: - - text: "Here is something to cheer you up:" - image: "https://i.imgur.com/nGF1K8f.jpg" - -.. note:: - - Please keep in mind that it is up to the implementation of the output - channel on how to display the defined buttons. The command line, for - example, can't display buttons or images, but tries to mimic them by - printing the options. - -Custom Output Payloads ----------------------- - -You can also send any arbitrary output to the output channel using the -``custom:`` key. Note that since the domain is in yaml format, the json -payload should first be converted to yaml format. - -For example, although date pickers are not a defined parameter in utterance -templates because they are not supported by most channels, a Slack date picker -can be sent like so: - -.. code-block:: yaml - - templates: - utter_take_bet: - - custom: - blocks: - - type: section - text: - text: "Make a bet on when the world will end:" - type: mrkdwn - accessory: - type: datepicker - initial_date: '2019-05-21' - placeholder: - type: plain_text - text: Select a date - - -Channel-Specific Utterances ---------------------------- - -If you have certain utterances that you would like sent only to specific -channels, you can specify this with the ``channel:`` key. The value should match -the name defined in the ``name()`` method of the channel's ``OutputChannel`` -class. Channel-specific utterances are especially useful if creating custom -output payloads that will only work in certain channels. - - -.. code-block:: yaml - - templates: - utter_ask_game: - - text: "Which game would you like to play?" - channel: "slack" - custom: - - # payload for Slack dropdown menu to choose a game - - text: "Which game would you like to play?" - buttons: - - title: "Chess" - payload: '/inform{"game": "chess"}' - - title: "Checkers" - payload: '/inform{"game": "checkers"}' - - title: "Fortnite" - payload: '/inform{"game": "fortnite"}' - -Each time your bot looks for utterances, it will first check to see if there -are any channel-specific templates for the connected channel. If there are, it -will choose **only** from these utterances. If no channel-specific templates are -found, it will choose from any utterances that do not have a defined ``channel``. -Therefore, it is good practice to always have at least one template for each -utterance that has no ``channel`` specified so that your bot can respond in all -environments, including in the shell and in interactive learning. - -Variables ---------- - -You can also use **variables** in your templates to insert information -collected during the dialogue. You can either do that in your custom python -code or by using the automatic slot filling mechanism. For example, if you -have a template like this: - -.. code-block:: yaml - - templates: - utter_greet: - - text: "Hey, {name}. How are you?" - -Rasa will automatically fill that variable with a value found in a slot called -``name``. - -In custom code, you can retrieve a template by using: - -.. testsetup:: - - from rasa_sdk.actions import Action - -.. testcode:: - - class ActionCustom(Action): - def name(self): - return "action_custom" - - def run(self, dispatcher, tracker, domain): - # send utter default template to user - dispatcher.utter_template("utter_default", tracker) - # ... other code - return [] - -If the template contains variables denoted with ``{my_variable}`` -you can supply values for the fields by passing them as keyword -arguments to ``utter_template``: - -.. code-block:: python - - dispatcher.utter_template("utter_default", tracker, my_variable="my text") - -Variations ----------- - -If you want to randomly vary the response sent to the user, you can list -multiple responses and Rasa will randomly pick one of them, e.g.: - -.. code-block:: yaml - - templates: - utter_greeting: - - text: "Hey, {name}. How are you?" - - text: "Hey, {name}. How is your day going?" - -.. _use_entities: - -Ignoring entities for certain intents -------------------------------------- - -If you want all entities to be ignored for certain intents, you can -add the ``use_entities: []`` parameter to the intent in your domain -file like this: - -.. code-block:: yaml - - intents: - - greet: - use_entities: [] - -To ignore some entities or explicitly take only certain entities -into account you can use this syntax: - -.. code-block:: yaml - - intents: - - greet: - use_entities: - - name - - first_name - ignore_entities: - - location - - age - -This means that excluded entities for those intents will be unfeaturized and therefore -will not impact the next action predictions. This is useful when you have -an intent where you don't care about the entities being picked up. If you list -your intents as normal without this parameter, the entities will be -featurized as normal. - -.. note:: - - If you really want these entities not to influence action prediction we - suggest you make the slots with the same name of type ``unfeaturized``. diff --git a/docs/core/fallback-actions.rst b/docs/core/fallback-actions.rst deleted file mode 100644 index 9bfe1018e1e4..000000000000 --- a/docs/core/fallback-actions.rst +++ /dev/null @@ -1,124 +0,0 @@ -:desc: Define custom fallback actions with thresholds for NLU and Core for letting - your conversation fail gracefully with open source dialogue management. - -.. _fallback-actions: - -Fallback Actions -================ - -.. edit-link:: - -Sometimes you want to revert to a fallback action, such as replying, -`"Sorry, I didn't understand that"`. You can handle fallback cases by adding -either the ``FallbackPolicy`` or the ``TwoStageFallbackPolicy`` to your -policy ensemble. - -Fallback Policy ---------------- - - -The ``FallbackPolicy`` has one fallback action, which will -be executed if the intent recognition has a confidence below ``nlu_threshold`` -or if none of the dialogue policies predict an action with -confidence higher than ``core_threshold``. - -The thresholds and fallback action can be adjusted in the policy configuration -file as parameters of the ``FallbackPolicy``. - -.. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - fallback_action_name: "action_default_fallback" - -``action_default_fallback`` is a default action in Rasa Core which sends the -``utter_default`` template message to the user. Make sure to specify -the ``utter_default`` in your domain file. It will also revert back to the -state of the conversation before the user message that caused the -fallback, so that it will not influence the prediction of future actions. -You can take a look at the source of the action below: - -.. autoclass:: rasa.core.actions.action.ActionDefaultFallback - - -You can also create your own custom action to use as a fallback (see -:ref:`custom actions <custom-actions>` for more info on custom actions). If you -do, make sure to pass the custom fallback action to ``FallbackPolicy`` inside -your policy configuration file. For example: - -.. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - fallback_action_name: "my_fallback_action" - - -.. note:: - If your custom fallback action does not return a ``UserUtteranceReverted`` event, - the next predictions of your bot may become inaccurate, as it is very likely that - the fallback action is not present in your stories. - -If you have a specific intent, let's say it's called ``out_of_scope``, that -should always trigger the fallback action, you should add this as a story: - -.. code-block:: story - - ## fallback story - * out_of_scope - - action_default_fallback - - -Two-stage Fallback Policy -------------------------- - -The ``TwoStageFallbackPolicy`` handles low NLU confidence in multiple stages -by trying to disambiguate the user input (low core confidence is handled in -the same manner as the ``FallbackPolicy``). - -- If a NLU prediction has a low confidence score, the user is asked to affirm - the classification of the intent. (Default action: - ``action_default_ask_affirmation``) - - - If they affirm, the story continues as if the intent was classified - with high confidence from the beginning. - - If they deny, the user is asked to rephrase their message. - -- Rephrasing (default action: ``action_default_ask_rephrase``) - - - If the classification of the rephrased intent was confident, the story - continues as if the user had this intent from the beginning. - - If the rephrased intent was not classified with high confidence, the user - is asked to affirm the classified intent. - -- Second affirmation (default action: ``action_default_ask_affirmation``) - - - If the user affirms the intent, the story continues as if the user had - this intent from the beginning. - - If the user denies, the original intent is classified as the specified - ``deny_suggestion_intent_name``, and an ultimate fallback action - ``fallback_nlu_action_name`` is triggered (e.g. a handoff to a human). - -Rasa Core provides the default implementations of -``action_default_ask_affirmation`` and ``action_default_ask_rephrase``. -The default implementation of ``action_default_ask_rephrase`` action utters -the response template ``utter_ask_rephrase``, so be sure to specify this -template in your domain file. -The implementation of both actions can be overwritten with :ref:`custom actions <custom-actions>`. - -You can specify the core fallback action as well as the ultimate NLU -fallback action as parameters to ``TwoStageFallbackPolicy`` in your -policy configuration file. - -.. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - core_threshold: 0.3 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" diff --git a/docs/core/forms.rst b/docs/core/forms.rst deleted file mode 100644 index cd31c7043a9c..000000000000 --- a/docs/core/forms.rst +++ /dev/null @@ -1,336 +0,0 @@ -:desc: Follow a rule-based process of information gathering using FormActions - in open source bot framework Rasa. - -.. _forms: - -Forms -===== - -.. edit-link:: - -.. note:: - There is an in-depth tutorial `here <https://blog.rasa.com/building-contextual-assistants-with-rasa-formaction/>`_ about how to use Rasa Forms for slot filling. - -.. contents:: - :local: - -One of the most common conversation patterns is to collect a few pieces of -information from a user in order to do something (book a restaurant, call an -API, search a database, etc.). This is also called **slot filling**. - - -If you need to collect multiple pieces of information in a row, we recommended -that you create a ``FormAction``. This is a single action which contains the -logic to loop over the required slots and ask the user for this information. -There is a full example using forms in the ``examples/formbot`` directory of -Rasa Core. - - -When you define a form, you need to add it to your domain file. -If your form's name is ``restaurant_form``, your domain would look like this: - -.. code-block:: yaml - - forms: - - restaurant_form - actions: - ... - -See ``examples/formbot/domain.yml`` for an example. - -Configuration File ------------------- - -To use forms, you also need to include the ``FormPolicy`` in your policy -configuration file. For example: - -.. code-block:: yaml - - policies: - - name: "FormPolicy" - -see ``examples/formbot/config.yml`` for an example. - -Form Basics ------------ - -Using a ``FormAction``, you can describe *all* of the happy paths with a single -story. By "happy path", we mean that whenever you ask a user for some information, -they respond with the information you asked for. - -If we take the example of the restaurant bot, this single story describes all of the -happy paths. - -.. code-block:: story - - ## happy path - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - form{"name": null} - -In this story the user intent is ``request_restaurant``, which is followed by -the form action ``restaurant_form``. With ``form{"name": "restaurant_form"}`` the -form is activated and with ``form{"name": null}`` the form is deactivated again. -As shown in the section :ref:`section_unhappy` the bot can execute any kind of -actions outside the form while the form is still active. On the "happy path", -where the user is cooperating well and the system understands the user input correctly, -the form is filling all requested slots without interruption. - -The ``FormAction`` will only request slots which haven't already been set. -If a user starts the conversation with -`I'd like a vegetarian Chinese restaurant for 8 people`, then they won't be -asked about the ``cuisine`` and ``num_people`` slots. - -Note that for this story to work, your slots should be :ref:`unfeaturized -<unfeaturized-slot>`. If any of these slots are featurized, your story needs to -include ``slot{}`` events to show these slots being set. In that case, the -easiest way to create valid stories is to use :ref:`interactive-learning`. - -In the story above, ``restaurant_form`` is the name of our form action. -Here is an example of what it looks like. -You need to define three methods: - -- ``name``: the name of this action -- ``required_slots``: a list of slots that need to be filled for the ``submit`` method to work. -- ``submit``: what to do at the end of the form, when all the slots have been filled. - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.name - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.required_slots - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.submit - -Once the form action gets called for the first time, -the form gets activated and the ``FormPolicy`` jumps in. -The ``FormPolicy`` is extremely simple and just always predicts the form action. -See :ref:`section_unhappy` for how to work with unexpected user input. - -Every time the form action gets called, it will ask the user for the next slot in -``required_slots`` which is not already set. -It does this by looking for a template called ``utter_ask_{slot_name}``, -so you need to define these in your domain file for each required slot. - -Once all the slots are filled, the ``submit()`` method is called, where you can -use the information you've collected to do something for the user, for example -querying a restaurant API. -If you don't want your form to do anything at the end, just use ``return []`` -as your submit method. -After the submit method is called, the form is deactivated, -and other policies in your Core model will be used to predict the next action. - -Custom slot mappings --------------------- - -If you do not define slot mappings, slots will be only filled by entities -with the same name as the slot that are picked up from the user input. -Some slots, like ``cuisine``, can be picked up using a single entity, but a -``FormAction`` can also support yes/no questions and free-text input. -The ``slot_mappings`` method defines how to extract slot values from user responses. - -Here's an example for the restaurant bot: - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.slot_mappings - -The predefined functions work as follows: - -- ``self.from_entity(entity=entity_name, intent=intent_name)`` - will look for an entity called ``entity_name`` to fill a slot - ``slot_name`` regardless of user intent if ``intent_name`` is ``None`` - else only if the users intent is ``intent_name``. -- ``self.from_intent(intent=intent_name, value=value)`` - will fill slot ``slot_name`` with ``value`` if user intent is ``intent_name``. - To make a boolean slot, take a look at the definition of ``outdoor_seating`` - above. Note: Slot will not be filled with user intent of message triggering - the form action. Use ``self.from_trigger_intent`` below. -- ``self.from_trigger_intent(intent=intent_name, value=value)`` - will fill slot ``slot_name`` with ``value`` if form was triggered with user - intent ``intent_name``. -- ``self.from_text(intent=intent_name)`` will use the next - user utterance to fill the text slot ``slot_name`` regardless of user intent - if ``intent_name`` is ``None`` else only if user intent is ``intent_name``. -- If you want to allow a combination of these, provide them as a list as in the - example above - - -Validating user input ---------------------- - -After extracting a slot value from user input, the form will try to validate the -value of the slot. Note that by default, validation only happens if the form -action is executed immediately after user input. This can be changed in the -``_validate_if_required()`` function of the ``FormAction`` class in Rasa SDK. -Any required slots that were filled before the initial activation of a form -are validated upon activation as well. - -By default, validation only checks if the requested slot was successfully -extracted from the slot mappings. If you want to add custom validation, for -example to check a value against a database, you can do this by writing a helper -validation function with the name ``validate_{slot-name}``. - -Here is an example , ``validate_cuisine()``, which checks if the extracted cuisine slot -belongs to a list of supported cuisines. - -.. literalinclude:: ../../examples/formbot/actions.py - :pyobject: RestaurantForm.cuisine_db - -.. literalinclude:: ../../examples/formbot/actions.py - :pyobject: RestaurantForm.validate_cuisine - -As the helper validation functions return dictionaries of slot names and values -to set, you can set more slots than just the one you are validating from inside -a helper validation method. However, you are responsible for making sure that -those extra slot values are valid. - -You can also deactivate the form directly during this validation step (in case the -slot is filled with something that you are certain can't be handled) by returning -``self.deactivate()`` - -If nothing is extracted from the user's utterance for any of the required slots, an -``ActionExecutionRejection`` error will be raised, meaning the action execution -was rejected and therefore Core will fall back onto a different policy to -predict another action. - -.. _section_unhappy: - -Handling unhappy paths ----------------------- - -Of course your users will not always respond with the information you ask of them. -Typically, users will ask questions, make chitchat, change their mind, or otherwise -stray from the happy path. The way this works with forms is that a form will raise -an ``ActionExecutionRejection`` if the user didn't provide the requested information. -You need to handle events that might cause ``ActionExecutionRejection`` errors -in your stories. For example, if you expect your users to chitchat with your bot, -you could add a story like this: - -.. code-block:: story - - ## chitchat - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - * chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - -In some situations, users may change their mind in the middle of form action -and decide not to go forward with their initial request. In cases like this, the -assistant should stop asking for the requested slots. You can handle such situations -gracefully using a default action ``action_deactivate_form`` which will deactivate -the form and reset the requested slot. An example story of such conversation could -look as follows: - -.. code-block:: story - - ## chitchat - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - * stop - - utter_ask_continue - * deny - - action_deactivate_form - - form{"name": null} - - -It is **strongly** recommended that you build these stories using interactive learning. -If you write these stories by hand you will likely miss important things. -Please read :ref:`section_interactive_learning_forms` -on how to use interactive learning with forms. - -The requested_slot slot ------------------------ - -The slot ``requested_slot`` is automatically added to the domain as an -unfeaturized slot. If you want to make it featurized, you need to add it -to your domain file as a categorical slot. You might want to do this if you -want to handle your unhappy paths differently depending on what slot is -currently being asked from the user. For example, say your users respond -to one of the bot's questions with another question, like *why do you need to know that?* -The response to this ``explain`` intent depends on where we are in the story. -In the restaurant case, your stories would look something like this: - -.. code-block:: story - - ## explain cuisine slot - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "cuisine"} - * explain - - utter_explain_cuisine - - restaurant_form - - slot{"cuisine": "greek"} - ( ... all other slots the form set ... ) - - form{"name": null} - - ## explain num_people slot - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "num_people"} - * explain - - utter_explain_num_people - - restaurant_form - - slot{"cuisine": "greek"} - ( ... all other slots the form set ... ) - - form{"name": null} - -Again, is is **strongly** recommended that you use interactive -learning to build these stories. -Please read :ref:`section_interactive_learning_forms` -on how to use interactive learning with forms. - -.. _conditional-logic: - -Handling conditional slot logic -------------------------------- - -Many forms require more logic than just requesting a list of fields. -For example, if someone requests ``greek`` as their cuisine, you may want to -ask if they are looking for somewhere with outside seating. - -You can achieve this by writing some logic into the ``required_slots()`` method, -for example: - -.. code-block:: python - - @staticmethod - def required_slots(tracker) -> List[Text]: - """A list of required slots that the form has to fill""" - - if tracker.get_slot('cuisine') == 'greek': - return ["cuisine", "num_people", "outdoor_seating", - "preferences", "feedback"] - else: - return ["cuisine", "num_people", - "preferences", "feedback"] - -This mechanism is quite general and you can use it to build many different -kinds of logic into your forms. - - - -Debugging ---------- - -The first thing to try is to run your bot with the ``debug`` flag, see :ref:`command-line-interface` for details. -If you are just getting started, you probably only have a few hand-written stories. -This is a great starting point, but -you should give your bot to people to test **as soon as possible**. One of the guiding principles -behind Rasa Core is: - -.. pull-quote:: Learning from real conversations is more important than designing hypothetical ones - -So don't try to cover every possibility in your hand-written stories before giving it to testers. -Real user behavior will always surprise you! diff --git a/docs/core/interactive-learning.rst b/docs/core/interactive-learning.rst deleted file mode 100644 index c094093f778a..000000000000 --- a/docs/core/interactive-learning.rst +++ /dev/null @@ -1,273 +0,0 @@ -:desc: Use Interactive learning to continuously validate and improve the - performance of your AI Assistant using machine learning based - open source dialogue management. - -.. _interactive-learning: - -Interactive Learning -==================== - -.. edit-link:: - -This page shows how to use interactive learning on the command line. - -In interactive learning mode, you provide feedback to your bot while you talk -to it. This is a powerful way -to explore what your bot can do, and the easiest way to fix any mistakes -it makes. One advantage of machine learning-based dialogue is that when -your bot doesn't know how to do something yet, you can just teach it! -Some people call this `Software 2.0 <https://medium.com/@karpathy/software-2-0-a64152b37c35>`_. - - -.. note:: - - Rasa X provides a UI for interactive learning, and you can use any user conversation - as a starting point. See `Annotate Conversations <https://rasa.com/docs/rasa-x/annotate-conversations/>`_ in the Rasa X docs. - -.. contents:: - :local: - -Running Interactive Learning -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Run the following command to start interactive learning: - -.. code-block:: bash - - rasa run actions --actions actions& - - rasa interactive \ - -m models/20190515-135859.tar.gz \ - --endpoints endpoints.yml - -The first command starts the action server (see :ref:`custom-actions`). - -The second command starts interactive learning mode. - -In interactive mode, Rasa will ask you to confirm every prediction -made by NLU and Core before proceeding. -Here's an example: - -.. code-block:: text - - Bot loaded. Type a message and press enter (use '/stop' to exit). - - ? Next user input: hello - - ? Is the NLU classification for 'hello' with intent 'hello' correct? Yes - - ------ - Chat History - - # Bot You - ──────────────────────────────────────────── - 1 action_listen - ──────────────────────────────────────────── - 2 hello - intent: hello 1.00 - ------ - - ? The bot wants to run 'utter_greet', correct? (Y/n) - - -The chat history and slot values are printed to the screen, which -should be all the information your need to decide what the correct -next action is. - -In this case, the bot chose the -right action (``utter_greet``), so we type ``y``. -Then we type ``y`` again, because ``action_listen`` is the correct -action after greeting. We continue this loop, chatting with the bot, -until the bot chooses the wrong action. - -Providing feedback on errors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For this example we are going to use the ``concertbot`` example, -so make sure you have the domain & data for it. You can download -the data from our `github repo -<https://github.com/RasaHQ/rasa/tree/master/examples/concertbot>`_. - -If you ask ``/search_concerts``, the bot should suggest -``action_search_concerts`` and then ``action_listen`` (the confidence at which -the policy selected its next action will be displayed next to the action name). -Now let's enter ``/compare_reviews`` as the next user message. -The bot *might* choose the wrong one out of the two -possibilities (depending on the training run, it might also be correct): - -.. code-block:: text - - ------ - Chat History - - # Bot You - ─────────────────────────────────────────────────────────────── - 1 action_listen - ─────────────────────────────────────────────────────────────── - 2 /search_concerts - intent: search_concerts 1.00 - ─────────────────────────────────────────────────────────────── - 3 action_search_concerts 0.72 - action_listen 0.78 - ─────────────────────────────────────────────────────────────── - 4 /compare_reviews - intent: compare_reviews 1.00 - - - Current slots: - concerts: None, venues: None - - ------ - ? The bot wants to run 'action_show_concert_reviews', correct? No - - -Now we type ``n``, because it chose the wrong action, and we get a new -prompt asking for the correct one. This also shows the probabilities the -model has assigned to each of the actions: - -.. code-block:: text - - ? What is the next action of the bot? (Use arrow keys) - ❯ 0.53 action_show_venue_reviews - 0.46 action_show_concert_reviews - 0.00 utter_goodbye - 0.00 action_search_concerts - 0.00 utter_greet - 0.00 action_search_venues - 0.00 action_listen - 0.00 utter_youarewelcome - 0.00 utter_default - 0.00 action_default_fallback - 0.00 action_restart - - - -In this case, the bot should ``action_show_concert_reviews`` (rather than venue -reviews!) so we select that action. - -Now we can keep talking to the bot for as long as we like to create a longer -conversation. At any point you can press ``Ctrl-C`` and the bot will -provide you with exit options. You can write your newly-created stories and NLU -data to files. You can also go back a step if you made a mistake when providing -feedback. - -Make sure to combine the dumped stories and NLU examples with your original -training data for the next training. - -Visualization of conversations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -During the interactive learning, Rasa will plot the current conversation -and a few similar conversations from the training data to help you -keep track of where you are. - -You can view the visualization at http://localhost:5005/visualization.html -as soon as you've started interactive learning. - -To skip the visualization, run ``rasa interactive --skip-visualization``. - -.. image:: /_static/images/interactive_learning_graph.gif - -.. _section_interactive_learning_forms: - -Interactive Learning with Forms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you're using a FormAction, there are some additional things to keep in mind -when using interactive learning. - -The ``form:`` prefix -~~~~~~~~~~~~~~~~~~~~ - -The form logic is described by your ``FormAction`` class, and not by the stories. -The machine learning policies should not have to learn this behavior, and should -not get confused if you later change your form action, for example by adding or -removing a required slot. -When you use interactive learning to generate stories containing a form, -the conversation steps handled by the form -get a :code:`form:` prefix. This tells Rasa Core to ignore these steps when training -your other policies. There is nothing special you have to do here, all of the form's -happy paths are still covered by the basic story given in :ref:`forms`. - -Here is an example: - -.. code-block:: story - - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "cuisine"} - * form: inform{"cuisine": "mexican"} - - slot{"cuisine": "mexican"} - - form: restaurant_form - - slot{"cuisine": "mexican"} - - slot{"requested_slot": "num_people"} - * form: inform{"number": "2"} - - form: restaurant_form - - slot{"num_people": "2"} - - form{"name": null} - - slot{"requested_slot": null} - - utter_slots_values - - -Input validation -~~~~~~~~~~~~~~~~ - -Every time the user responds with something *other* than the requested slot or -any of the required slots, -you will be asked whether you want the form action to try and extract a slot -from the user's message when returning to the form. This is best explained with -and example: - -.. code-block:: text - - 7 restaurant_form 1.00 - slot{"num_people": "3"} - slot{"requested_slot": "outdoor_seating"} - do you want to sit outside? - action_listen 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 8 /stop - intent: stop 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 9 utter_ask_continue 1.00 - do you want to continue? - action_listen 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 10 /affirm - intent: affirm 1.00 - - - Current slots: - cuisine: greek, feedback: None, num_people: 3, outdoor_seating: None, - preferences: None, requested_slot: outdoor_seating - - ------ - 2018-11-05 21:36:53 DEBUG rasa.core.tracker_store - Recreating tracker for id 'default' - ? The bot wants to run 'restaurant_form', correct? Yes - 2018-11-05 21:37:08 DEBUG rasa.core.tracker_store - Recreating tracker for id 'default' - ? Should 'restaurant_form' validate user input to fill the slot 'outdoor_seating'? (Y/n) - -Here the user asked to stop the form, and the bot asks the user whether they're sure -they don't want to continue. The user says they want to continue (the ``/affirm`` intent). -Here ``outdoor_seating`` has a ``from_intent`` slot mapping (mapping -the ``/affirm`` intent to ``True``), so this user input could be used to fill -that slot. However, in this case the user is just responding to the -"do you want to continue?" question and so you select ``n``, the user input -should not be validated. The bot will then continue to ask for the -``outdoor_seating`` slot again. - -.. warning:: - - If there is a conflicting story in your training data, i.e. you just chose - to validate the input (meaning it will be printed with the ``forms:`` prefix), - but your stories file contains the same story where you don't validate - the input (meaning it's without the ``forms:`` prefix), you will need to make - sure to remove this conflicting story. When this happens, there is a warning - prompt that reminds you to do this: - - **WARNING: FormPolicy predicted no form validation based on previous training - stories. Make sure to remove contradictory stories from training data** - - Once you've removed that story, you can press enter and continue with - interactive learning diff --git a/docs/core/knowledge-bases.rst b/docs/core/knowledge-bases.rst deleted file mode 100644 index 6d5a1988cd2a..000000000000 --- a/docs/core/knowledge-bases.rst +++ /dev/null @@ -1,560 +0,0 @@ -:desc: Leverage information from knowledge bases inside conversations using ActionQueryKnowledgeBase - in open source bot framework Rasa. - -.. _knowledge_base_actions: - -Knowledge Base Actions -====================== - -.. edit-link:: - -.. warning:: - This feature is experimental. - We introduce experimental features to get feedback from our community, so we encourage you to try it out! - However, the functionality might be changed or removed in the future. - If you have feedback (positive or negative) please share it with us on the `forum <https://forum.rasa.com>`_. - -.. contents:: - :local: - -Knowledge base actions enable you to handle the following kind of conversations: - -.. image:: ../_static/images/knowledge-base-example.png - -A common problem in conversational AI is that users do not only refer to certain objects by their names, -but also use reference terms such as "the first one" or "it". -We need to keep track of the information that was presented to resolve these mentions to -the correct object. - -In addition, users may want to obtain detailed information about objects during a conversation -- -for example, whether a restaurant has outside seating, or how expensive it is. -In order to respond to those user requests, knowledge about the restaurant domain is needed. -Since the information is subject to change, hard-coding the information isn't the solution. - - -To handle the above challenges, Rasa can be integrated with knowledge bases. To use this integration, you can create a -custom action that inherits from ``ActionQueryKnowledgeBase``, a pre-written custom action that contains -the logic to query a knowledge base for objects and their attributes. - -You can find a complete example in ``examples/knowledgebasebot`` -(`knowledge base bot <https://github.com/RasaHQ/rasa/blob/master/examples/knowledgebasebot/>`_), as well as instructions -for implementing this custom action below. - - -Using ``ActionQueryKnowledgeBase`` ----------------------------------- - -.. _create_knowledge_base: - -Create a Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~ - -The data used to answer the user's requests will be stored in a knowledge base. -A knowledge base can be used to store complex data structures. -We suggest you get started by using the ``InMemoryKnowledgeBase``. -Once you want to start working with a large amount of data, you can switch to a custom knowledge base -(see :ref:`custom_knowledge_base`). - -To initialize an ``InMemoryKnowledgeBase``, you need to provide the data in a json file. -The following example contains data about restaurants and hotels. -The json structure should contain a key for every object type, i.e. ``"restaurant"`` and ``"hotel"``. -Every object type maps to a list of objects -- here we have a list of 3 restaurants and a list of 3 hotels. - -.. code-block:: json - - { - "restaurant": [ - { - "id": 0, - "name": "Donath", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - }, - { - "id": 1, - "name": "Berlin Burrito Company", - "cuisine": "Mexican", - "outside-seating": false, - "price-range": "cheap" - }, - { - "id": 2, - "name": "I due forni", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - } - ], - "hotel": [ - { - "id": 0, - "name": "Hilton", - "price-range": "expensive", - "breakfast-included": true, - "city": "Berlin", - "free-wifi": true, - "star-rating": 5, - "swimming-pool": true - }, - { - "id": 1, - "name": "Hilton", - "price-range": "expensive", - "breakfast-included": true, - "city": "Frankfurt am Main", - "free-wifi": true, - "star-rating": 4, - "swimming-pool": false - }, - { - "id": 2, - "name": "B&B", - "price-range": "mid-range", - "breakfast-included": false, - "city": "Berlin", - "free-wifi": false, - "star-rating": 1, - "swimming-pool": false - }, - ] - } - - -Once the data is defined in a json file, called, for example, ``data.json``, you will be able use the this data file to create your -``InMemoryKnowledgeBase``, which will be passed to the action that queries the knowledge base. - -Every object in your knowledge base should have at least the ``"name"`` and ``"id"`` fields to use the default implementation. -If it doesn't, you'll have to :ref:`customize your InMemoryKnowledgeBase <customize_in_memory_knowledge_base>`. - - -Define the NLU Data -~~~~~~~~~~~~~~~~~~~ - -In this section: - -- we will introduce a new intent, ``query_knowledge_base`` -- we will to annotate ``mention`` entities so that our model detects indirect mentions of objects like "the - first one" -- we will use :ref:`synonyms <entity_synonyms>` extensively - -For the bot to understand that the user wants to retrieve information from the knowledge base, you need to define -a new intent. We will call it ``query_knowledge_base``. - -We can split requests that ``ActionQueryKnowledgeBase`` can handle into two categories: -(1) the user wants to obtain a list of objects of a specific type, or (2) the user wants to know about a certain -attribute of an object. The intent should contain lots of variations of both of these requests: - -.. code-block:: md - - ## intent:query_knowledge_base - - what [restaurants](object_type:restaurant) can you recommend? - - list some [restaurants](object_type:restaurant) - - can you name some [restaurants](object_type:restaurant) please? - - can you show me some [restaurant](object_type:restaurant) options - - list [German](cuisine) [restaurants](object_type:restaurant) - - do you have any [mexican](cuisine) [restaurants](object_type:restaurant)? - - do you know the [price range](attribute:price-range) of [that one](mention)? - - what [cuisine](attribute) is [it](mention)? - - do you know what [cuisine](attribute) the [last one](mention:LAST) has? - - does the [first one](mention:1) have [outside seating](attribute:outside-seating)? - - what is the [price range](attribute:price-range) of [Berlin Burrito Company](restaurant)? - - what about [I due forni](restaurant)? - - can you tell me the [price range](attribute) of [that restaurant](mention)? - - what [cuisine](attribute) do [they](mention) have? - ... - -The above example just shows examples related to the restaurant domain. -You should add examples for every object type that exists in your knowledge base to the same ``query_knowledge_base`` intent. - -In addition to adding a variety of training examples for each query type, -you need to specify the and annotate the following entities in your training examples: - -- ``object_type``: Whenever a training example references a specific object type from your knowledge base, the object type should - be marked as an entity. Use :ref:`synonyms <entity_synonyms>` to map e.g. ``restaurants`` to ``restaurant``, the correct - object type listed as a key in the knowledge base. -- ``mention``: If the user refers to an object via "the first one", "that one", or "it", you should mark those terms - as ``mention``. We also use synonyms to map some of the mentions to symbols. You can learn about that - in :ref:`resolving mentions <resolve_mentions>`. -- ``attribute``: All attribute names defined in your knowledge base should be identified as ``attribute`` in the - NLU data. Again, use synonyms to map variations of an attribute name to the one used in the - knowledge base. - -Remember to add those entities to your domain file (as entities and slots): - -.. code-block:: yaml - - entities: - - object_type - - mention - - attribute - - slots: - object_type: - type: unfeaturized - mention: - type: unfeaturized - attribute: - type: unfeaturized - - -.. _create_action_query_knowledge_base: - - -Create an Action to Query your Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create your own knowledge base action, you need to inherit ``ActionQueryKnowledgeBase`` and pass the knowledge -base to the constructor of ``ActionQueryKnowledgeBase``. - -.. code-block:: python - - class MyKnowledgeBaseAction(ActionQueryKnowledgeBase): - def __init__(self): - knowledge_base = InMemoryKnowledgeBase("data.json") - super().__init__(knowledge_base) - -Whenever you create an ``ActionQueryKnowledgeBase``, you need to pass a ``KnowledgeBase`` to the constructor. -It can be either an ``InMemoryKnowledgeBase`` or your own implementation of a ``KnowledgeBase`` -(see :ref:`custom_knowledge_base`). -You can only pull information from one knowledge base, as the usage of multiple knowledge bases at the same time is not supported. - -This is the entirety of the code for this action! The name of the action is ``action_query_knowledge_base``. -Don't forget to add it to your domain file: - -.. code-block:: yaml - - actions: - - action_query_knowledge_base - -.. note:: - If you overwrite the default action name ``action_query_knowledge_base``, you need to add the following three - unfeaturized slots to your domain file: ``knowledge_base_objects``, ``knowledge_base_last_object``, and - ``knowledge_base_last_object_type``. - The slots are used internally by ``ActionQueryKnowledgeBase``. - If you keep the default action name, those slots will be automatically added for you. - -You also need to make sure to add a story to your stories file that includes the intent ``query_knowledge_base`` and -the action ``action_query_knowledge_base``. For example: - -.. code-block:: md - - ## Happy Path - * greet - - utter_greet - * query_knowledge_base - - action_query_knowledge_base - * goodbye - - utter_goodbye - -The last thing you need to do is to define the template ``utter_ask_rephrase`` in your domain file. -If the action doesn't know how to handle the user's request, it will use this template to ask the user to rephrase. -For example, add the following templates to your domain file: - -.. code-block:: md - - utter_ask_rephrase: - - text: "Sorry, I'm not sure I understand. Could you rephrase it?" - - text: "Could you please rephrase your message? I didn't quite get that." - -After adding all the relevant pieces, the action is now able to query the knowledge base. - -How It Works ------------- - -``ActionQueryKnowledgeBase`` looks at both the entities that were picked up in the request as well as the -previously set slots to decide what to query for. - -Query the Knowledge Base for Objects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In order to query the knowledge base for any kind of object, the user's request needs to include the object type. -Let's look at an example: - - `Can you please name some restaurants?` - -This question includes the object type of interest: "restaurant." -The bot needs to pick up on this entity in order to formulate a query -- otherwise the action would not know what objects the user is interested in. - -When the user says something like: - - `What Italian restaurant options in Berlin do I have?` - -The user wants to obtain a list of restaurants that (1) have Italian cuisine and (2) are located in -Berlin. If the NER detects those attributes in the request of the user, the action will use those to filter the -restaurants found in the knowledge base. - -In order for the bot to detect these attributes, you need to mark "Italian" and "Berlin" as entities in the NLU data: - -.. code-block:: md - - What [Italian](cuisine) [restaurant](object_type) options in [Berlin](city) do I have?. - -The names of the attributes, "cuisine" and "city," should be equal to the ones used in the knowledge base. -You also need to add those as entities and slots to the domain file. - -Query the Knowledge Base for an Attribute of an Object -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If the user wants to obtain specific information about an object, the request should include both the object and -attribute of interest. -For example, if the user asks something like: - - `What is the cuisine of Berlin Burrito Company?` - -The user wants to obtain the "cuisine" (attribute of interest) for the restaurant "Berlin Burrito Company" (object of -interest). - -The attribute and object of interest should be marked as entities in the NLU training data: - -.. code-block:: md - - What is the [cuisine](attribute) of [Berlin Burrito Company](restaurant)? - -Make sure to add the object type, "restaurant," to the domain file as entity and slot. - - -.. _resolve_mentions: - -Resolve Mentions -~~~~~~~~~~~~~~~~ - -Following along from the above example, users may not always refer to restaurants by their names. -Users can either refer to the object of interest by its name, e.g. "Berlin Burrito Company" (representation string -of the object), or they may refer to a previously listed object via a mention, for example: - - `What is the cuisine of the second restaurant you mentioned?` - -Our action is able to resolve these mentions to the actual object in the knowledge base. -More specifically, it can resolve two mention types: (1) ordinal mentions, such as "the first one", and (2) -mentions such as "it" or "that one". - -**Ordinal Mentions** - -When a user refers to an object by its position in a list, it is called an ordinal mention. Here's an example: - -- User: `What restaurants in Berlin do you know?` -- Bot: `Found the following objects of type 'restaurant': 1: I due forni 2: PastaBar 3: Berlin Burrito Company` -- User: `Does the first one have outside seating?` - -The user referred to "I due forni" by the term "the first one". -Other ordinal mentions might include "the second one," "the last one," "any," or "3". - -Ordinal mentions are typically used when a list of objects was presented to the user. -To resolve those mentions to the actual object, we use an ordinal mention mapping which is set in the -``KnowledgeBase`` class. -The default mapping looks like: - - .. code-block:: python - - { - "1": lambda l: l[0], - "2": lambda l: l[1], - "3": lambda l: l[2], - "4": lambda l: l[3], - "5": lambda l: l[4], - "6": lambda l: l[5], - "7": lambda l: l[6], - "8": lambda l: l[7], - "9": lambda l: l[8], - "10": lambda l: l[9], - "ANY": lambda l: random.choice(list), - "LAST": lambda l: l[-1], - } - -The ordinal mention mapping maps a string, such as "1", to the object in a list, e.g. ``lambda l: l[0]``, meaning the -object at index ``0``. - -As the ordinal mention mapping does not, for example, include an entry for "the first one", -it is important that you use :ref:`entity_synonyms` to map "the first one" in your NLU data to "1": - -.. code-block:: md - - Does the [first one](mention:1) have [outside seating](attribute:outside-seating)? - -The NER detects "first one" as a ``mention`` entity, but puts "1" into the ``mention`` slot. -Thus, our action can take the ``mention`` slot together with the ordinal mention mapping to resolve "first one" to -the actual object "I due forni". - -You can overwrite the ordinal mention mapping by calling the function ``set_ordinal_mention_mapping()`` on your -``KnowledgeBase`` implementation (see :ref:`customize_in_memory_knowledge_base`). - -**Other Mentions** - -Take a look at the following conversation: - -- User: `What is the cuisine of PastaBar?` -- Bot: `PastaBar has an Italian cuisine.` -- User: `Does it have wifi?` -- Bot: `Yes.` -- User: `Can you give me an address?` - -In the question "Does it have wifi?", the user refers to "PastaBar" by the word "it". -If the NER detected "it" as the entity ``mention``, the knowledge base action would resolve it to the last mentioned -object in the conversation, "PastaBar". - -In the next input, the user refers indirectly to the object "PastaBar" instead of mentioning it explicitly. -The knowledge base action would detect that the user wants to obtain the value of a specific attribute, in this case, the address. -If no mention or object was detected by the NER, the action assumes the user is referring to the most recently -mentioned object, "PastaBar". - -You can disable this behaviour by setting ``use_last_object_mention`` to ``False`` when initializing the action. - - -Customization -------------- - -Customizing ``ActionQueryKnowledgeBase`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can overwrite the following two functions of ``ActionQueryKnowledgeBase`` if you'd like to customize what the bot -says to the user: - -- ``utter_objects()`` -- ``utter_attribute_value()`` - -``utter_objects()`` is used when the user has requested a list of objects. -Once the bot has retrieved the objects from the knowledge base, it will respond to the user by default with a message, formatted like: - - `Found the following objects of type 'restaurant':` - `1: I due forni` - `2: PastaBar` - `3: Berlin Burrito Company` - -Or, if no objects are found, - - `I could not find any objects of type 'restaurant'.` - -If you want to change the utterance format, you can overwrite the method ``utter_objects()`` in your action. - -The function ``utter_attribute_value()`` determines what the bot utters when the user is asking for specific information about -an object. - -If the attribute of interest was found in the knowledge base, the bot will respond with the following utterance: - - `'Berlin Burrito Company' has the value 'Mexican' for attribute 'cuisine'.` - -If no value for the requested attribute was found, the bot will respond with - - `Did not find a valid value for attribute 'cuisine' for object 'Berlin Burrito Company'.` - -If you want to change the bot utterance, you can overwrite the method ``utter_attribute_value()``. - -.. note:: - There is a `tutorial <https://blog.rasa.com/integrating-rasa-with-knowledge-bases/>`_ on our blog about - how to use knowledge bases in custom actions. The tutorial explains the implementation behind - ``ActionQueryKnowledgeBase`` in detail. - - -Creating Your Own Knowledge Base Actions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``ActionQueryKnowledgeBase`` should allow you to easily get started with integrating knowledge bases into your actions. -However, the action can only handle two kind of user requests: - -- the user wants to get a list of objects from the knowledge base -- the user wants to get the value of an attribute for a specific object - -The action is not able to compare objects or consider relations between objects in your knowledge base. -Furthermore, resolving any mention to the last mentioned object in the conversation might not always be optimal. - -If you want to tackle more complex use cases, you can write your own custom action. -We added some helper functions to ``rasa_sdk.knowledge_base.utils`` -(`link to code <https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/>`_ ) -to help you when implement your own solution. -We recommend using ``KnowledgeBase`` interface so that you can still use the ``ActionQueryKnowledgeBase`` -alongside your new custom action. - -If you write a knowledge base action that tackles one of the above use cases or a new one, be sure to tell us about -it on the `forum <https://forum.rasa.com>`_! - - -.. _customize_in_memory_knowledge_base: - -Customizing the ``InMemoryKnowledgeBase`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The class ``InMemoryKnowledgeBase`` inherits ``KnowledgeBase``. -You can customize your ``InMemoryKnowledgeBase`` by overwriting the following functions: - -- ``get_key_attribute_of_object()``: To keep track of what object the user was talking about last, we store the value - of the key attribute in a specific slot. Every object should have a key attribute that is unique, - similar to the primary key in a relational database. By default, the name of the key attribute for every object type - is set to ``id``. You can overwrite the name of the key attribute for a specific object type by calling - ``set_key_attribute_of_object()``. -- ``get_representation_function_of_object()``: Let's focus on the following restaurant: - - .. code-block:: json - - { - "id": 0, - "name": "Donath", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - } - - When the user asks the bot to list any Italian restaurant, it doesn't need all of the details of the restaurant. - Instead, you want to provide a meaningful name that identifies the restaurant -- in most cases, the name of the object will do. - The function ``get_representation_function_of_object()`` returns a lambda function that maps the - above restaurant object to its name. - - .. code-block:: python - - lambda obj: obj["name"] - - This function is used whenever the bot is talking about a specific object, so that the user is presented a meaningful - name for the object. - - By default, the lambda function returns the value of the ``"name"`` attribute of the object. - If your object does not have a ``"name"`` attribute , or the ``"name"`` of an object is - ambiguous, you should set a new lambda function for that object type by calling - ``set_representation_function_of_object()``. -- ``set_ordinal_mention_mapping()``: The ordinal mention mapping is needed to resolve an ordinal mention, such as - "second one," to an object in a list. By default, the ordinal mention mapping looks like this: - - .. code-block:: python - - { - "1": lambda l: l[0], - "2": lambda l: l[1], - "3": lambda l: l[2], - "4": lambda l: l[3], - "5": lambda l: l[4], - "6": lambda l: l[5], - "7": lambda l: l[6], - "8": lambda l: l[7], - "9": lambda l: l[8], - "10": lambda l: l[9], - "ANY": lambda l: random.choice(list), - "LAST": lambda l: l[-1], - } - - You can overwrite it by calling the function ``set_ordinal_mention_mapping()``. - If you want to learn more about how this mapping is used, check out :ref:`resolve_mentions`. - - -See the `example bot <https://github.com/RasaHQ/rasa/blob/master/examples/knowledgebasebot/actions.py>`_ for an -example implementation of an ``InMemoryKnowledgeBase`` that uses the method ``set_representation_function_of_object()`` -to overwrite the default representation of the object type "hotel." -The implementation of the ``InMemoryKnowledgeBase`` itself can be found in the -`rasa-sdk <https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/>`_ package. - - -.. _custom_knowledge_base: - -Creating Your Own Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you have more data or if you want to use a more complex data structure that, for example, involves relations between -different objects, you can create your own knowledge base implementation. -Just inherit ``KnowledgeBase`` and implement the methods ``get_objects()``, ``get_object()``, and -``get_attributes_of_object()``. The `knowledge base code <https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/>`_ -provides more information on what those methods should do. - -You can also customize your knowledge base further, by adapting the methods mentioned in the section -:ref:`customize_in_memory_knowledge_base`. - -.. note:: - We wrote a `blog post <https://blog.rasa.com/set-up-a-knowledge-base-to-encode-domain-knowledge-for-rasa/>`_ - that explains how you can set up your own knowledge base. diff --git a/docs/core/old-core-change-log.rst b/docs/core/old-core-change-log.rst deleted file mode 100644 index bda2dc5a065f..000000000000 --- a/docs/core/old-core-change-log.rst +++ /dev/null @@ -1,1025 +0,0 @@ -:desc: Rasa Core Changelog - -.. _old-core-change-log: - -Core Change Log -=============== - -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning`_ starting with version 0.2.0. - -[0.14.4] - 2019-05-13 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in core evaluations - -[0.14.3] - 2019-05-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed interactive learning history printing - -[0.14.2] - 2019-05-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed required version of ``rasa_core_sdk`` during installation - -[0.14.1] - 2019-05-02 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed MappingPolicy bug upon prediction of ACTION_LISTEN after mapped action - -[0.14.0] - 2019-04-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``tf.ConfigProto`` configuration can now be specified - for tensorflow based pipelines -- open api spec for the Rasa Core SDK action server -- documentation about early deactivation of a form in validation -- Added max_event_history in tracker_store to set this value in DialogueStateTracker -- utility functions for colored logging -- open webbrowser when visualizing stories -- added ``/parse`` endpoint to query for NLU results -- File based event store -- ability to configure event store using the endpoints file -- added ability to use multiple env vars per line in yaml files -- added ``priority`` property of policies to influence best policy in - the case of equal confidence -- **support for python 3.7** -- ``Tracker.active_form`` now includes ``trigger_message`` attribute to allow - access to message triggering the form -- ``MappingPolicy`` which can be used to directly map an intent to an action - by adding the ``triggers`` keyword to an intent in the domain. -- default action ``action_back``, which when triggered with ``/back`` allows - the user to undo their previous message - -Changed -------- -- starter packs are now tested in parallel with the unittests, - and only on master and branches ending in ``.x`` (i.e. new version releases) -- renamed ``train_dialogue_model`` to ``train`` -- renamed ``rasa_core.evaluate`` to ``rasa_core.test`` -- ``event_broker.publish`` receives the event as a dict instead of text -- configuration key ``store_type`` of the tracker store endpoint configuration - has been renamed to ``type`` to allow usage across endpoints -- renamed ``policy_metadata.json`` to ``metadata.json`` for persisted models -- ``scores`` array returned by the ``/conversations/{sender_id}/predict`` - endpoint is now sorted according to the actions' scores. -- now randomly created augmented stories are subsampled during training and marked, - so that memo policies can ignore them -- changed payloads from "text" to "message" in files: server.yml, docs/connectors.rst, - rasa_core/server.py, rasa_core/training/interactive.py, tests/test_interactive.py -- dialogue files in ``/data/test_dialogues`` were updated with conversations - from the bots in ``/examples`` -- updated to tensorflow 1.13 - -Removed -------- -- removed ``admin_token`` from ``RasaChatInput`` since it wasn't used - -Fixed ------ -- When a ``fork`` is used in interactive learning, every forked - storyline is saved (not just the last) -- Handles slot names which contain characters that are invalid as python - variable name (e.g. dot) in a template - -[0.13.8] - 2019-04-16 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Message parse data no longer passed to graph node label in interactive - learning visualization - -[0.13.7] - 2019-04-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in end-to-end evaluations - -[0.13.6] - 2019-03-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process intent messages in end-to-end evaluations - -[Unreleased 0.13.8.aX] -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Message parse data no longer passed to graph node label in interactive - learning visualization - -[0.13.7] - 2019-04-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in end-to-end evaluations - -[0.13.6] - 2019-03-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process intent messages in end-to-end evaluations - -[0.13.4] - 2019-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly tag docker image as ``stable`` (instead of tagging alpha tags) - -[0.13.3] - 2019-03-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Tracker Store Mongo DB's documentation now has ``auth_source`` parameter, - which is used for passing database name associated with the user's - credentials. - -[0.13.2] - 2019-02-06 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- ``MessageProcessor`` now also passes ``message_id`` to the interpreter - when parsing with a ``RasaNLUHttpInterpreter`` - -[0.13.1] - 2019-01-29 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``message_id`` can now be passed in the payload to the - ``RasaNLUHttpInterpreter`` - -Fixed ------ -- fixed domain persistence after exiting interactive learning -- fix form validation question error in interactive learning - -.. _corev0-13-0: - -[0.13.0] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- A support for session persistence mechanism in the ``SocketIOInput`` - compatible with the example SocketIO WebChat + short explanation on - how session persistence should be implemented in a frontend -- ``TwoStageFallbackPolicy`` which asks the user for their affirmation - if the NLU confidence is low for an intent, for rephrasing the intent - if they deny the suggested intent, and does finally an ultimate fallback - if it does not get the intent right -- Additional checks in PolicyEnsemble to ensure that custom Policy - classes' ``load`` function returns the correct type -- Travis script now clones and tests the Rasa stack starter pack -- Entries for tensorflow and sklearn versions to the policy metadata -- SlackInput wont ignore ``app_mention`` event anymore. - Will handle messages containing @mentions to bots and will respond to these - (as long as the event itself is enabled in the application hosting the bot) -- Added sanitization mechanism for SlackInput that (in its current shape and form) - strips bot's self mentions from messages posted using the said @mentions. -- Added sanitization mechanism for SlackInput that (in its current - shape and form) strips bot's self mentions from messages posted using - the said @mentions. -- Added random seed option for KerasPolicy and EmbeddingPolicy - to allow for reproducible training results -- ``InvalidPolicyConfig`` error if policy in policy configuration could not be - loaded, or if ``policies`` key is empty or not provided -- Added a unique identifier to ``UserMessage`` and the ``UserUttered`` event. - -Removed -------- -- removed support for deprecated intents/entities format - -Changed -------- -- replaced ``pytest-pep8`` with ``pytest-pycodestyle`` -- switch from ``PyInquirer`` to ``questionary`` for the display of - commandline interface (to avoid prompt toolkit 2 version issues) -- if NLU classification returned ``None`` in interactive training, - directly ask a user for a correct intent -- trigger ``fallback`` on low nlu confidence - only if previous action is ``action_listen`` -- updated docs for interactive learning to inform users of the - ``--core`` flag -- Change memoization policies confidence score to 1.1 to override ML policies -- replaced flask server with async sanic - -Fixed ------ -- fix error during interactive learning which was caused by actions which - dispatched messages using ``dispatcher.utter_custom_message`` -- re-added missing ``python-engineio`` dependency -- fixed not working examples in ``examples/`` -- strip newlines from messages so you don't have something like "\n/restart\n" -- properly reload domain when using ``/model`` endpoint to upload new model -- updated documentation for custom channels to use the ``credentials.yml`` - -[0.12.3] - 2018-12-03 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added ``scipy`` dependency (previously pulled in through keras) -- added element representation for command-line output - -Changed -------- -- improved button representation for custom buttons in command-line - -Changed -------- -- randomized initial sender_id during interactive training to avoid - loading previous sessions from persistent tracker stores - -Removed -------- -- removed keras dependency, since ``keras_policy`` uses ``tf.keras`` - - -[0.12.2] - 2018-11-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- argument handling on evaluate script -- added basic sanitization during visualization - - -[0.12.1] - 2018-11-11 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed interactive learning to properly submit executed actions to the action - server -- allow the specification of the policy configuration while using the - visualisation script -- use default configuration if no policy configuration is passed -- fixed html delivery from interactive server script (package compatible) -- ``SlackBot`` when created in ``SlackInputChannel`` inherits the - ``slack_channel`` property, allowing Slack bots to post to any channel - instead of only back to the user -- fix writing of new domain file from interactive learning -- fix reading of state featurizers from yaml -- fix reading of batch_size parameter in keras policy - - -.. _corev0-12-0: - -[0.12.0] - 2018-11-11 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the - :ref:`migration-guide` guide before updating. **You need to retrain your models.** - -Added ------ -- new connector for the Cisco Webex Teams chat -- openapi documentation of server API -- NLU data learned through interactive learning will now be stored in a - separate markdown-format file (any previous NLU data is merged) -- Command line interface for interactive learning now displays policy - confidence alongside the action name -- added action prediction confidence & policy to ``ActionExecuted`` event -- the Core policy configuration can now be set in a config.yaml file. - This makes training custom policies possible. -- both the date and the time at which a model was trained are now - included in the policy's metadata when it is persisted -- show visualization of conversation while doing interactive learning -- option for end-to-end evaluation of Rasa Core and NLU examples in - ``evaluate.py`` script -- `/conversations/{sender_id}/story` endpoint for returning - the end-to-end story describing a conversation -- docker-compose file to start a rasa core server together with nlu, - an action server, and duckling -- http server (``rasa_core.run --enable-api``) evaluation endpoint -- ability to add tracker_store using endpoints.yml -- ability load custom tracker store modules using the endpoints.yml -- ability to add an event broker using an endpoint configuration file -- raise an exception when ``server.py`` is used instead of - ``rasa_core.run --enable-api`` -- add documentation on how to configure endpoints within a configuration file -- ``auth_source`` parameter in ``MongoTrackerStore`` defining the database to - authenticate against -- missing instructions on setting up the facebook connector -- environment variables specified with ``${env_variable}`` in a yaml - configuration file are now replaced with the value of the - environment variable -- detailed documentation on how to deploy Rasa with Docker -- make ``wait_time_between_pulls`` configurable through endpoint - configuration -- add ``FormPolicy`` to handle form action prediction -- add ``ActionExecutionRejection`` exception and - ``ActionExecutionRejected`` event -- add default action ``ActionDeactivateForm()`` -- add ``formbot`` example -- add ability to turn off auto slot filling with entity for each - slot in domain.yml -- add ``InvalidDomain`` exception -- add ``active_form_...`` to state dictionary -- add ``active_form`` and ``latest_action_name`` properties to - ``DialogueStateTracker`` -- add ``Form`` and ``FormValidation`` events -- add ``REQUESTED_SLOT`` constant -- add ability to read ``action_listen`` from stories -- added train/eval scripts to compare policies - -Changed -------- -- improved response format for ``/predict`` endpoint -- all error messages from the server are now in json format -- ``agent.log_message`` now returns a tracker instead of the trackers state -- the core container does not load the nlu model by default anymore. - Instead it can be connected to a nlu server. -- stories are now visualized as ``.html`` page instead of an image -- move and deduplicate restaurantbot nlu data from ``franken_data.json`` - to ``nlu_data.md`` -- forms were completely reworked, see changelog in ``rasa_core_sdk`` -- state featurization if some form is active changed -- ``Domain`` raises ``InvalidDomain`` exception -- interactive learning is now started with rasa_core.train interactive -- passing a policy config file to train a model is now required -- flags for output of evaluate script have been merged to one flag ``--output`` - where you provide a folder where any output from the script should be stored - -Removed -------- -- removed graphviz dependency -- policy config related flags in training script (see migration guide) - - -Fixed ------ -- fixed an issue with boolean slots where False and None had the same value - (breaking model compatibility with models that use a boolean slot) -- use utf8 everywhere when handling file IO -- argument ``--connector`` on run script accepts custom channel module names -- properly handle non ascii categorical slot values, e.g. ``大于100亿元`` -- fixed HTTP server attempting to authenticate based on incorrect path to - the correct JWT data field -- all sender ids from channels are now handled as `str`. - Sender ids from old messages with an `int` id are converted to `str`. -- legacy pep8 errors - - -[0.11.12] - 2018-10-11 -^^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Remove livechat widget from docs - - -[0.11.11] - 2018-10-05 -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Add missing name() to facebook Messenger class - - -[0.11.10] - 2018-10-05 -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- backport fix to JWT schema - - -[0.11.9] - 2018-10-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- pin tensorflow 1.10.0 - -[0.11.8] - 2018-09-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- cancel reminders if there has been a restarted event after the reminder - -Changed -------- -- JWT authentication now checks user roles. The ``admin`` role may access all - endpoints. For endpoints which contain a ``sender_id`` parameter, users - with the ``user`` role may only call endpoints where the ``sender_id`` - matches the user's ``username``. - -[0.11.7] - 2018-09-26 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- custom message method in rocketchat channel - -Fixed ------ -- don't fail if rasa and rest input channels are used together -- wrong paramter name in rocketchat channel methods -- Software 2.0 link on interactive learning documentation page went to - Tesla's homepage, now it links to Karpathy blogpost - -[0.11.6] - 2018-09-20 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``UserMessage`` and ``UserUttered`` classes have a new attribute - ``input_channel`` that stores the name of the ``InputChannel`` - through which the message was received - -[0.11.5] - 2018-09-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- numpy version incompatibility between rasa core and tensorflow - -[0.11.4] - 2018-09-19 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- a flag ``--fail_on_prediction_errors`` to the ``evaluate.py`` script - - if used when running the evaluation, the script will fail with a non - 0 exit code if there is at least one prediction error. This can be - used on CIs to validate models against test stories. -- JWT support: parameters to allow clients to authenticate requests to - the rasa_core.server using JWT's in addition to normal token based auth -- added socket.io input / output channel -- ``UserMessage`` and ``UserUttered`` classes have a new attribute - ``input_channel`` that stores the name of the ``InputChannel`` - through which the message was received - -Changed -------- -- dump failed stories after evaluation in the normal story format instead of - as a text file -- do not run actions during evaluation. instead, action are only predicted - and validated against the gold story. -- improved the online learning experience on the CLI -- made finetuning during online learning optional (use ``--finetune`` if - you want to enable it) - -Removed -------- -- package pytest-services since it wasn't necessary - -Fixed ------ -- fixed an issue with the followup (there was a name confusion, sometimes - the followup action would be set to the non existent ``follow_up_action`` - attribute instead of ``followup_action``) - -[0.11.3] - 2018-09-04 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- callback output channel, receives messages and uses a REST endpoint to - respond with messages - -Changed -------- -- channel input creation moved to the channel, every channel can now - customize how it gets created from the credentials file - -[0.11.2] - 2018-09-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- improved documentation for events (e.g. including json serialisation) - -Removed -------- -- outdated documentation for removed endpoints in the server - (``/parse`` & ``/continue``) - -Fixed ------ -- read in fallback command line args - -[0.11.1] - 2018-08-30 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- increased minimal compatible model version to 0.11.0 - -.. _corev0-11-0: - -[0.11.0] - 2018-08-30 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the - :ref:`migration-guide` guide before updating. You need to retrain your models. - - -Added ------ -- added microsoft botframework input and output channels -- added rocket chat input and output channels -- script parameter ``--quiet`` to set the log level to ``WARNING`` -- information about the python version a model has been trained with to the - model metadata -- more emoji support for PY2 -- intent confidence support in RegexInterpreter -- added paramter to train script to pull training data from an url instead - of a stories file -- added new policy: :ref:`embedding_policy` implemented in tensorflow - -Changed -------- -- default log level for all scripts has been changed from ``WARNING`` to - ``INFO``. -- format of the credentials file to allow specifying the credentials for - multiple channels -- webhook URLs for the input channels have changed and need to be reset -- deprecated using ``rasa_core.server`` as a script - use - ``rasa_core.run --enable_api`` instead -- collecting output channel will no properly collect events for images, - buttons, and attachments - -Removed -------- -- removed the deprecated ``TopicSet`` event -- removed ``tracker.follow_up_action`` - use the ``FollowupAction`` - event instead -- removed ``action_factory: remote`` from domain file - the domain is - always run over http -- removed ``OnlineLearningPolicy`` - use the ``training.online`` - script instead - -Fixed -------- -- lots of type annotations -- some invalid documentation references -- changed all ``logger.warn`` to ``logger.warning`` - -[0.10.4] - 2018-08-08 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- more emoji support for PY2 -- intent confidence support in RegexInterpreter - -[0.10.3] - 2018-08-03 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- updated to Rasa NLU 0.13 -- improved documentation quickstart - -Fixed ------ -- server request argument handling on python 3 -- creation of training data story graph - removes more nodes and speeds up - the training - -[0.10.2] - 2018-07-24 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- new ``RasaChatInput`` channel -- option to ignore entities for certain intents - -Fixed ------ -- loading of NLU model - -[0.10.1] - 2018-07-18 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- - -- documentation changes - -.. _corev0-10-0: - -[0.10.0] - 2018-07-17 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a major new release with backward incompatible changes. Old trained - models can not be read with the new version - you need to retrain your model. - View the :ref:`migration-guide` for details. - -Added ------ -- allow bot responses to be managed externally (instead of putting them into - the ``domain.yml``) -- options to prevent slack from making re-deliver message upon meeting failure condition. - the default is to ignore ``http_timeout``. -- added ability to create domain from yaml string and export a domain to a yaml string -- added server endpoint to fetch domain as json or yaml -- new default action ActionDefaultFallback -- event streaming to a ``RabbitMQ`` message broker using ``Pika`` -- docs section on event brokers -- ``Agent()`` class supports a ``model_server`` ``EndpointConfig``, which it regularly queries to fetch dialogue models -- this can be used with ``rasa_core.server`` with the ``--endpoint`` option (the key for this the model server config is ``model``) -- docs on model fetching from a URL - -Changed -------- -- changed the logic inside AugmentedMemoizationPolicy to recall actions only if they are the same in training stories -- moved AugmentedMemoizationPolicy to memoization.py -- wrapped initialization of BackgroundScheduler in try/except to allow running on jupyterhub / binderhub/ colaboratory -- fixed order of events logged on a tracker: action executed is now always - logged before bot utterances that action created - -Removed -------- -- removed support for topics - -[0.9.6] - 2018-06-18 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed fallback policy data generation - -[0.9.5] - 2018-06-14 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- handling of max history configuration in policies -- fixed instantiation issues of fallback policy - -[0.9.4] - 2018-06-07 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed evaluation script -- fixed story file loading (previously some story files with checkpoints could - create wrong training data) -- improved speed of data loading - -[0.9.3] - 2018-05-30 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- added token auth to all endpoints of the core server - - -[0.9.2] - 2018-05-30 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fix handling of max_history parameter in AugmentedMemoizationPolicy - -[0.9.1] - 2018-05-29 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- persistence of training data collected during online learning if default - file path is used -- the ``agent()`` method used in some ``rasa_core.server`` endpoints is - re-run at every new call of the ``ensure_loaded_agent`` decorator -- fixed OR usage of intents - -.. _corev0-9-0: - -[0.9.0] - 2018-05-24 -^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a major new release with backward incompatible changes. Old trained - models can not be read with the new version - you need to retrain your model. - -Added ------ -- supported loading training data from a folder - loads all stories from - all files in that directory -- parameter to specify NLU project when instantiating a ``RasaNLUInterpreter`` -- simple ``/respond`` endpoint to get bot response to a user message -- ``/conversations`` endpoint for listing sender ids of running conversations -- added a Mattermost channel that allows Rasa Core to communicate via a Mattermost app -- added a Twilio channel that allows Rasa Core to communicate via SMS -- ``FallbackPolicy`` for executing a default message if NLU or core model confidence is low. -- ``FormAction`` class to make it easier to collect multiple pieces of information with fewer stories. -- Dockerfile for ``rasa_core.server`` with a dialogue and Rasa NLU model - -Changed -------- -- moved server from klein to flask -- updated dependency fbmessenger from 4.3.1 to 5.0.0 -- updated Rasa NLU to 0.12.x -- updated all the dependencies to the latest versions - -Fixed ------ -- List slot is now populated with a list -- Slack connector: ``slack_channel`` kwarg is used to send messages either back to the user or to a static channel -- properly log to a file when using the ``run`` script -- documentation fix on stories - - -[0.8.6] - 2018-04-18 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin rasa nlu version to 0.11.4 (0.12.x only works with master) - -[0.8.5] - 2018-03-19 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- updated google analytics docs survey code - - -[0.8.4] - 2018-03-14 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin ``pykwalify<=1.6.0`` as update to ``1.6.1`` breaks compatibility - -[0.8.3] - 2018-02-28 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin ``fbmessenger`` version to avoid major update - -[0.8.2] - 2018-02-13 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- script to reload a dumped trackers state and to continue the conversation - at the end of the stored dialogue - -Changed -------- -- minor updates to dependencies - -Fixed ------ -- fixed datetime serialisation of reminder event - -[0.8.1] - 2018-02-01 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed deque to support python 3.5 -- Documentation improvements to tutorials -- serialisation of date time value for ``ReminderScheduled`` event - -.. _corev0-8-0: - -[0.8.0] - 2018-01-30 -^^^^^^^^^^^^^^^^^^^^ - -This is a major version change. Make sure to take a look at the -:ref:`migration-guide` in the documentation for advice on how to -update existing projects. - -Added ------ -- ``--debug`` and ``--verbose`` flags to scripts (train.py, run.py, server.py) - to set the log level -- support for story cycles when using checkpoints -- added a new machine learning policy `SklearnPolicy` that uses an sklearn - classifier to predict actions (logistic regression by default) -- warn if action emits events when using a model that it did never emit in - any of the stories the model was trained on -- support for event pushing and endpoints to retrieve the tracker state from the server -- Timestamp to every event -- added a Slack channel that allows Rasa Core to communicate via a Slack app -- added a Telegram channel that allows Rasa Core to communicate via a Telegram bot - -Changed -------- -- rewrite of the whole FB connector: replaced pymessenger library with fbmessenger -- story file utterance format changed from ``* _intent_greet[name=Rasa]`` - to ``* intent_greet{"name": "Rasa"}`` (old format is still supported but - deprecated) -- persist action names in domain during model persistence -- improved travis build speed by not using miniconda -- don't fail with an exception but with a helpful error message if an - utterance template contains a variable that can not be filled -- domain doesn't fail on unknown actions but emits a warning instead. this is to support reading - logs from older conversation if one recently removed an action from the domain - -Fixed ------ -- proper evaluation of stories with checkpoints -- proper visualisation of stories with checkpoints -- fixed float slot min max value handling -- fixed non integer feature decoding, e.g. used for memoization policy -- properly log to specified file when starting Rasa Core server -- properly calculate offset of last reset event after loading tracker from - tracker store -- UserUtteranceReverted action incorrectly triggered actions to be replayed - - -[0.7.9] - 2017-11-29 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- visualisation using Networkx version 2.x -- add output about line of failing intent when parsing story files - -[0.7.8] - 2017-11-27 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Pypi readme rendering - -[0.7.7] - 2017-11-24 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- log bot utterances to tracker - -Fixed ------ -- documentation improvements in README -- renamed interpreter argument to rasa core server - -[0.7.6] - 2017-11-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- moodbot example train command in docs - - -[0.7.5] - 2017-11-14 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- "sender_id" (and "DEFAULT_SENDER_ID") keyword consistency issue #56 - -Fixed ------ -- improved moodbot example - more nlu examples as well as better fitting of dialogue model - - -[0.7.4] - 2017-11-09 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- - -- added method to tracker to retrieve the latest entities #68 - -[0.7.3] - 2017-10-31 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- parameter to specify font size when rendering story visualization - -Fixed ------ -- fixed documentation of story visualization - -[0.7.2] - 2017-10-30 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added facebook bot example -- added support for conditional checkpoints. a checkpoint can be restricted to - only allow one to use it if certain slots are set. see docs for details -- utterance templates in domain yaml support buttons and images -- validate domain yaml and raise exception on invalid file -- ``run`` script to load models and handle messages from an input channel - -Changed -------- -- small dropout in standard keras model to decrease reliance on exact intents -- a LOT of documentation improvements - -Fixed ------ -- fixed http error if action listen is not confirmed. #42 - -[0.7.1] - 2017-10-06 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- issues with restart events. They created wrong a messed up history leading to - wrong predictions - - -.. _corev0-7-0: - -[0.7.0] - 2017-10-04 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for Rasa Core usage as a server with remote action execution - -Changed -------- -- switched to max code line length 80 -- removed action id - use ``action.name()`` instead. if an action implementation overrides the name, it should include the ``action_`` prefix (as it is not automatically added anymore) -- renamed ``rasa_dm.util`` to ``rasa_dm.utils`` -- renamed the whole package to ``rasa_core`` (so ``rasa_dm`` is gone!) -- renamed ``Reminder`` attribute ``id`` to ``name`` -- a lot of documentation improvements. docs are now at https://rasa.com/docs/core -- use hashing when writing memorized turns into persistence - requires retraining of all models that are trained with a version prior to this -- changed ``agent.handle_message(...)`` interface for easier usage - -.. _corev0-6-0: - -[0.6.0] - 2017-08-27 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for multiple policies (e.g. one memoization and a Keras policy at the same time) -- loading domains from yaml files instead of defining them with python code -- added an api layer (called ``Agent``) for you to use for 95% of the things you want to do (training, persistence, loading models) -- support for reminders - -Changed -------- -- large refactoring of code base - -.. _corev0-5-0: - -[0.5.0] - 2017-06-18 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``ScoringPolicy`` added to policy implementations (less strict than standard default policy) -- ``RasaNLUInterpreter`` to run a nlu instance within dm (instead of using the http interface) -- more tests - -Changed -------- -- ``UserUtterance`` now holds the complete parse data from nlu (e.g. to access attributes other than entities or intent) -- ``Turn`` has a reference to a ``UserUtterance`` instead of directly storing intent & entities (allows access to other data) -- Simplified interface of output channels -- order of actions in the DefaultPolicy in ``possible_actions`` (``ActionListen`` now always has index 0) - -Fixed ------ -- ``RedisTrackerStore`` checks if tracker is stored before accessing it (otherwise a ``None`` access exception is thrown) -- ``RegexInterpreter`` checks if the regex actually matches the message instead of assuming it always does -- ``str`` implementation for all events -- ``Controller`` can be started without an input channel (e.g. messages need to be fed into the queue manually) - -.. _corev0-2-0: - -[0.2.0] - 2017-05-18 -^^^^^^^^^^^^^^^^^^^^ -First released version. - - -.. _`master`: https://github.com/RasaHQ/rasa_core/ - -.. _`Semantic Versioning`: http://semver.org/ diff --git a/docs/core/old-core-migration-guide.rst b/docs/core/old-core-migration-guide.rst deleted file mode 100644 index fdb7f9cb530c..000000000000 --- a/docs/core/old-core-migration-guide.rst +++ /dev/null @@ -1,429 +0,0 @@ -:desc: Information about changes between major versions of chatbot framework - Rasa Core and how you can migrate from one version to another. - -.. _old-core-migration-guide: - -Migration Guide -=============== -This page contains information about changes between major versions and -how you can migrate from one version to another. - -.. _migration-to-0-14-0: - -0.13.x to 0.14.0 - -General -~~~~~~~ - -- The python package has a new name, as does the module. You should install - the package using ``pip install rasa`` (instead of ``rasa_core``). - - The code moved from ``rasa_core`` to ``rasa.core`` - best way to fix is a - search and replace for the two most common usages: - ``from rasa_core`` and ``import rasa_core``. - - We have added a backwards compatibility package to still allow you to import - from ``rasa_core``, this will emit a warning but all imports will still - work. Nevertheless, you should do the above renaming of any access - to ``rasa_core``. - --The `MappingPolicy` is now included in `default_config.yml`. If you are using - a custom policy configuration make sure to update it appropriately. - -- deprecated ``remote.py`` got removed - the API should be consumed directly - instead or with the help of the ``rasa_core_sdk``. - -Asynchronous First -~~~~~~~~~~~~~~~~~~ -- **No more flask.** The flask webserver has been replaced with an asyncronous - webserver called Sanic. If you run the server in production using a wsgi - runner, there are instructions here on how to recreate that with the - sanic webserver: - https://sanic.readthedocs.io/en/latest/sanic/deploying.html#running-via-gunicorn -- **Agent**: some of the method signatures changed from normal functions to - async coroutines. These functions need to be awaited when called, e.g. - ``await agent.handle_message(...)``. Changed functions include - - ``handle_message`` - - ``handle_text`` - - ``log_message`` - - ``execute_action`` - - ``load_data`` - - ``visualize`` - -Custom Input / Output Channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you wrote your own input output channels, there are a couple of changes -necessary to make the channels work properly with the asyncio server operation: - -- **Need to provide Sanic blueprints.** To make the server fully asynchronous - the input channels need to provide Sanic blueprints instead of flask - blueprints. Imports should change from - ``from flask import Blueprint, request`` to - ``from sanic import Blueprint, response``. All route functions, e.g. - ``def webhook(...)`` need to be async and accept a request parameter as - their first argument, e.g. ``async def webhook(request, ...)``. - - Calls to ``on_new_message(...)`` need to be awaited: - ``await on_new_message(...)``. - - All output channel functions need to be async: - ``send_text_message``, ``send_image_url``, ``send_attachment``, - ``send_response``, ``send_text_with_buttons`` and ``send_custom_message``. - And all internal calls to these methods need to be awaited. - - For inspiration, feel free to check the code of the existing channels. - -Function Naming -~~~~~~~~~~~~~~~ -- renamed ``train_dialogue_model`` to ``train``. Please use ``train`` from - now on. -- renamed ``rasa_core.evaluate`` to ``rasa_core.test``. Please use ``test`` - from now on. - -.. _migration-to-0-13-0: - -0.12.x to 0.13.0 ----------------- - -.. warning:: - - Python 2 support has now been completely dropped: to upgrade to - this version you **must use Python 3**. As always, **make sure** - **you retrain your models when switching to this version** - -General -~~~~~~~ - -- Support for Python 2 has now been completely removed from Rasa Core, please - upgrade to Python 3.5 or 3.6 to continue using the software -- If you were using the deprecated intent/entity format (``_intent[entity1=val1, entity=val2]``), - then you will have to update your training data to the standard format - (``/intent{"entity1": val1, "entity2": val2``} because it is no longer supported - -.. _migration-to-0-12-0: - -0.11.x to 0.12.0 ----------------- - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the mentioned - before updating. Please make sure to - **retrain your models when switching to this version**. - -Train script -~~~~~~~~~~~~ - -- You **must** pass a policy config flag with ``-c/--config`` now when training - a model, see :ref:`policy_file`. -- Interactive learning is now started with - ``python -m rasa_core.train interactive`` rather than the - ``--interactive`` flag -- All policy configuration related flags have been removed (``--epochs``, - ``--max_history``, ``--validation_split``, ``--batch_size``, - ``--nlu_threshold``, ``--core_threshold``, - ``--fallback_action_name``), specify these in the policy config file instead, - see :ref:`policy_file` - -Visualisation script -~~~~~~~~~~~~~~~~~~~~ - -- You **must** pass a policy config flag with ``-c/--config`` now, - see :ref:`policy_file`. - -Evaluation script -~~~~~~~~~~~~~~~~~ - -- The ``--output`` flag now takes one argument: the name of the folder - any files generated from the script should be written to -- The ``--failed`` flag was removed, as this is part of the ``--output`` - flag now - -Forms -~~~~~ - -- Forms were completely reworked, please follow :ref:`forms` - for instructions how to use them. -- ``FormField`` class and its subclasses were removed, - overwrite ``FormAction.slot_mapping()`` method to specify the mapping between - user input and requested slot in the form - utilizing helper methods ``FormAction.from_entity(...)``, - ``FormAction.from_intent(...)`` and ``FormAction.from_text(...)`` -- stories for forms need to be written differently, - it is recommended to use interactive learning to create form stories -- functionality of ``FormAction.get_other_slots(...)`` was moved to - ``FormAction.extract_other_slots(...)`` -- functionality of ``FormAction.get_requested_slot(...)`` was moved to - ``FormAction.extract_requested_slot(...)`` -- overwrite ``FormAction.validate(...)`` method to validate user input against - the slot requested by the form - -.. _migration-to-0-11-0: - -0.10.x to 0.11.0 ----------------- - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the mentioned - before updating. Please make sure to - **retrain your models when switching to this version**. - -General -~~~~~~~ -.. note:: - - TL;DR these are the most important surface changes. But if you have - a second please take a minute to read all of them. - -- If you have custom actions, you now need to run a separate server to execute - them. If your actions are written in python (in a file called actions.py) you - can do this by running ``python -m rasa_core_sdk.endpoint --actions actions`` - and specifying the action endpoint in the ``endpoints.yml`` - For more information please read :ref:`custom actions <custom-actions>`. -- For your custom actions, the imports have changed from - ``from rasa_core.actions import Action`` to ``from rasa_core_sdk import Action`` and - from ``from rasa_core.events import *`` to ``from rasa_core_sdk.events import *`` -- The actions list in the domain now needs to always contain the actions names - instead of the classpath (e.g. change ``actions.ActionExample`` to ``action_example``) -- utter templates that should be used as actions, now need to start with - ``utter_``, otherwise the bot won't be able to find the action - -HTTP Server endpoints -~~~~~~~~~~~~~~~~~~~~~ -- We removed ``/parse`` and ``/continue`` endpoints used for running actions - remotely. This has been replaced by the action server that allows you - to run your action code in any language. There are no replacement endpoints - for these two, as the flow of information has been changed: Instead of you - calling Rasa Core to update the tracker and receive the next action to be - executed, Rasa Core will call your action server once it predicted an action. - More information can be found in the updated docs for :ref:`custom actions <custom-actions>`. - - -Webhooks -~~~~~~~~ -- The endpoints for the webhooks changed. All webhooks are now at - ``/webhooks/CHANNEL_NAME/webhook``. For example, the webhook - to receive facebook messages on a local instance is now - ``http://localhost:5005/webhooks/facebook/webhook``. -- format of the ``credentials.yml`` used in the ``run`` and ``server`` scripts - has changed to allow for multiple channels in one file: - - The new format now contains the channels name first, e.g. for facebook: - - .. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - -Changes to Input and Output Channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``ConsoleOutputChannel`` and ``ConsoleInputChannel`` have been removed. Either - use the `run script <https://github.com/RasaHQ/rasa_core/blob/master/rasa_core/run.py>`_ - to run your bot on the cmdline, or adapt the ``serve_application`` - `function <https://github.com/RasaHQ/rasa_core/blob/master/rasa_core/run.py#L260>`_ - to run from a python script. -- ``rasa_core.channels.direct`` output channel package removed. - ``CollectingOutputChannel`` moved to ``rasa_core.channels.channel`` -- ``HttpInputComponent`` renamed to ``InputChannel`` & moved to - ``rasa_core.channels.channel.InputChannel`` -- If you wrote your own custom input channel, make sure to inherit from - ``InputChannel`` instead of ``HttpInputComponent``. -- ``CollectingOutput`` channel will no properly collect events for images, - buttons, and attachments. The content of the collected messages has changed, - ``data`` is now called ``buttons``. -- removed package ``rasa_core.channels.rest``, - please use ``rasa_core.channels.RestInput`` instead -- remove file input channel ``rasa_core.channels.file.FileInputChannel`` -- signature of ``agent.handle_channel`` got renamed - and the signature changed. here is an up to date example: - - .. code-block:: python - - from rasa_core.channels.facebook import FacebookInput - - input_channel = FacebookInput(fb_verify="VERIFY", - fb_secret="SECRET", - fb_access_token="ACCESS_TOKEN") - agent.handle_channels([input_channel], port=5005, serve_forever=True) -- If you wrote your own custom output channel, make sure to split messages - on double new lines if you like (the ``InputChannel`` you inherit from - doesn't do this anymore), e.g.: - - .. code-block:: python - - def send_text_message(self, recipient_id: Text, message: Text) -> None: - """Send a message through this channel.""" - - for message_part in message.split("\n\n"): - # self.send would be the actual communication to e.g. facebook - self.send(recipient_id, message_part) - - -.. _migration-to-0-10-0: - -0.9.x to 0.10.0 ---------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - You can no longer load old models with this version, due to the addition of - the default action ``ActionDefaultFallback``. Please make sure to retrain - your model before using this version - -There have been some API changes to classes and methods: - -- if you use ``dispatcher.utter_template`` or - ``dispatcher.utter_button_template`` in your custom actions run code, - they now need the ``tracker`` as a second argument, e.g. - ``dispatcher.utter_template("utter_greet", tracker)`` - -- all input and output channels should have a ``name``. If you are using a - custom channel, make sure to implement a class method that returns - the name. The name needs to be added to the - **input channel and the output channel**. You can find examples - in ``rasa_core.channels.direct.CollectingOutputChannel``: - - .. code-block:: python - - @classmethod - def name(cls): - """Every channel needs a name""" - return "collector" - -- the ``RasaNLUHttpInterpreter`` when created now needs to be passed an - instance of ``EndpointConfig`` instead of ``server`` and ``token``, e.g.: - - .. code-block:: python - - from rasa_core.utils import EndpointConfig - - endpoint = EndpointConfig("http://localhost:500", token="mytoken") - interpreter = RasaNLUHttpInterpreter("mymodelname", endpoint) - -.. _migration-to-0-9-0: - -0.8.x to 0.9.0 --------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file formats have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -- loading data should be done either using: - - .. code-block:: python - - from rasa_core import training - - training_data = training.load_data(...) - - or using an agent instance: - - .. code-block:: python - - training_data = agent.load_data(...) - agent.train(training_data, ...) - - It is deprecated to pass the training data file directly to ``agent.train``. - Instead, the data should be loaded in one of the above ways and then passed - to train. - -- ``ScoringPolicy`` got removed and replaced by ``AugmentedMemoizationPolicy`` - which is similar, but is able to match more states to states it has seen - during trainer (e.g. it is able to handle slots better) - -- if you use custom featurizers, you need to - **pass them directly to the policy** that should use them. - This allows the policies to use different featurizers. Passing a featurizer - is **optional**. Accordingly, the ``max_history`` parameter moved to that - featurizer: - - .. code-block:: python - - from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer, - BinarySingleStateFeaturizer) - - featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), - max_history=5) - - agent = Agent(domain_file, - policies=[MemoizationPolicy(max_history=5), - KerasPolicy(featurizer)]) - - If no featurizer is passed during policy creation, the policies default - featurizer will be used. The `MemoizationPolicy` allows passing in the - `max_history` parameter directly, without creating a featurizer. - -- the ListSlot now stores a list of entities (with the same name) - present in an utterance - - -.. _migration-to-0-8-0: - -0.7.x to 0.8.0 --------------- - -- Credentials for the facebook connector changed. Instead of providing: - - .. code-block:: yaml - - # OLD FORMAT - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-tokens: - 1730621093913654: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - you should now pass the configuration parameters like this: - - .. code-block:: yaml - - # NEW FORMAT - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - As you can see, the new facebook connector only supports a single page. Same - change happened to the in code arguments for the connector which should be - changed to: - - .. code-block:: python - - from rasa_core.channels.facebook import FacebookInput - - FacebookInput( - credentials.get("verify"), - credentials.get("secret"), - credentials.get("page-access-token")) - -- Story file format changed from ``* _intent_greet[name=Rasa]`` - to ``* intent_greet{"name": "Rasa"}`` (old format is still supported but - deprecated). Instead of writing: - - .. code-block:: story - - ## story_07715946 <!-- name of the story - just for debugging --> - * _greet - - action_ask_howcanhelp - * _inform[location=rome,price=cheap] - - action_on_it <!-- user utterance, in format _intent[entities] --> - - action_ask_cuisine - - The new format looks like this: - - .. code-block:: story - - ## story_07715946 <!-- name of the story - just for debugging --> - * greet - - action_ask_howcanhelp - * inform{"location": "rome", "price": "cheap"} - - action_on_it <!-- user utterance, in format _intent[entities] --> - - action_ask_cuisine diff --git a/docs/core/policies.rst b/docs/core/policies.rst deleted file mode 100644 index 34c63cde152d..000000000000 --- a/docs/core/policies.rst +++ /dev/null @@ -1,572 +0,0 @@ -:desc: Define and train customized policy configurations to optimize your - contextual assistant for longer contexts or unseen utterances which - require generalization. - -.. _policies: - -Policies -======== - -.. edit-link:: - -.. contents:: - :local: - - -.. _policy_file: - -Configuring Policies -^^^^^^^^^^^^^^^^^^^^ - -The :class:`rasa.core.policies.Policy` class decides which action to take -at every step in the conversation. - -There are different policies to choose from, and you can include -multiple policies in a single :class:`rasa.core.agent.Agent`. - -.. note:: - - Per default a maximum of 10 next actions can be predicted - by the agent after every user message. To update this value - you can set the environment variable ``MAX_NUMBER_OF_PREDICTIONS`` - to the desired number of maximum predictions. - - -Your project's ``config.yml`` file takes a ``policies`` key -which you can use to customize the policies your assistant uses. -In the example below, the last two lines show how to use a custom -policy class and pass arguments to it. - -.. code-block:: yaml - - policies: - - name: "KerasPolicy" - featurizer: - - name: MaxHistoryTrackerFeaturizer - max_history: 5 - state_featurizer: - - name: BinarySingleStateFeaturizer - - name: "MemoizationPolicy" - max_history: 5 - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - fallback_action_name: "my_fallback_action" - - name: "path.to.your.policy.class" - arg1: "..." - - -Max History ------------ - -One important hyperparameter for Rasa Core policies is the ``max_history``. -This controls how much dialogue history the model looks at to decide which -action to take next. - -You can set the ``max_history`` by passing it to your policy's ``Featurizer`` -in the policy configuration yaml file. - -.. note:: - - Only the ``MaxHistoryTrackerFeaturizer`` uses a max history, - whereas the ``FullDialogueTrackerFeaturizer`` always looks at - the full conversation history. See :ref:`featurization` for details. - -As an example, let's say you have an ``out_of_scope`` intent which -describes off-topic user messages. If your bot sees this intent multiple -times in a row, you might want to tell the user what you `can` help them -with. So your story might look like this: - -.. code-block:: story - - * out_of_scope - - utter_default - * out_of_scope - - utter_default - * out_of_scope - - utter_help_message - -For Rasa Core to learn this pattern, the ``max_history`` -has to be `at least` 3. - -If you increase your ``max_history``, your model will become bigger and -training will take longer. If you have some information that should -affect the dialogue very far into the future, you should store it as a -slot. Slot information is always available for every featurizer. - - -Data Augmentation ------------------ - -When you train a model, by default Rasa Core will create -longer stories by randomly gluing together -the ones in your stories files. -This is because if you have stories like: - -.. code-block:: story - - # thanks - * thankyou - - utter_youarewelcome - - # bye - * goodbye - - utter_goodbye - - -You actually want to teach your policy to **ignore** the dialogue history -when it isn't relevant and just respond with the same action no matter -what happened before. - -You can alter this behaviour with the ``--augmentation`` flag. -Which allows you to set the ``augmentation_factor``. -The ``augmentation_factor`` determines how many augmented stories are -subsampled during training. The augmented stories are subsampled before training -since their number can quickly become very large, and we want to limit it. -The number of sampled stories is ``augmentation_factor`` x10. -By default augmentation is set to 20, resulting in a maximum of 200 augmented stories. - -``--augmentation 0`` disables all augmentation behavior. -The memoization based policies are not affected by augmentation -(independent of the ``augmentation_factor``) and will automatically -ignore all augmented stories. - -Action Selection -^^^^^^^^^^^^^^^^ - -At every turn, each policy defined in your configuration will -predict a next action with a certain confidence level. For more information -about how each policy makes its decision, read into the policy's description below. -The bot's next action is then decided by the policy that predicts with the highest confidence. - -In the case that two policies predict with equal confidence (for example, the Memoization -and Mapping Policies always predict with confidence of either 0 or 1), the priority of the -policies is considered. Rasa policies have default priorities that are set to ensure the -expected outcome in the case of a tie. They look like this, where higher numbers have higher priority: - - | 5. ``FormPolicy`` - | 4. ``FallbackPolicy`` and ``TwoStageFallbackPolicy`` - | 3. ``MemoizationPolicy`` and ``AugmentedMemoizationPolicy`` - | 2. ``MappingPolicy`` - | 1. ``EmbeddingPolicy``, ``KerasPolicy``, and ``SklearnPolicy`` - -This priority hierarchy ensures that, for example, if there is an intent with a mapped action, but the NLU confidence is not -above the ``nlu_threshold``, the bot will still fall back. In general, it is not recommended to have more -than one policy per priority level, and some policies on the same priority level, such as the two -fallback policies, strictly cannot be used in tandem. - -If you create your own policy, use these priorities as a guide for figuring out the priority of your policy. -If your policy is a machine learning policy, it should most likely have priority 1, the same as the Rasa machine -learning policies. - -.. warning:: - All policy priorities are configurable via the ``priority:`` parameter in the configuration, - but we **do not recommend** changing them outside of specific cases such as custom policies. - Doing so can lead to unexpected and undesired bot behavior. - -Keras Policy -^^^^^^^^^^^^ - -The ``KerasPolicy`` uses a neural network implemented in -`Keras <http://keras.io>`_ to select the next action. -The default architecture is based on an LSTM, but you can override the -``KerasPolicy.model_architecture`` method to implement your own architecture. - - -.. literalinclude:: ../../rasa/core/policies/keras_policy.py - :dedent: 4 - :pyobject: KerasPolicy.model_architecture - -and the training is run here: - -.. literalinclude:: ../../rasa/core/policies/keras_policy.py - :dedent: 4 - :pyobject: KerasPolicy.train - -You can implement the model of your choice by overriding these methods, -or initialize ``KerasPolicy`` with pre-defined ``keras model``. - -In order to get reproducible training results for the same inputs you can -set the ``random_seed`` attribute of the ``KerasPolicy`` to any integer. - - -.. _embedding_policy: - -Embedding Policy -^^^^^^^^^^^^^^^^ - -Transformer Embedding Dialogue Policy (TEDP) - -Transformer version of the Recurrent Embedding Dialogue Policy (REDP) -used in our paper: `<https://arxiv.org/abs/1811.11707>`_ - -This policy has a pre-defined architecture, which comprises the -following steps: - - - concatenate user input (user intent and entities), - previous system action, slots and active form - for each time step into an input vector - to pre-transformer embedding layer; - - feed it to transformer; - - apply a dense layer to the output of the transformer - to get embeddings of a dialogue for each time step; - - apply a dense layer to create embeddings for system actions for each time step; - - calculate the similarity between the - dialogue embedding and embedded system actions. - This step is based on the - `StarSpace <https://arxiv.org/abs/1709.03856>`_ idea. - -It is recommended to use -``state_featurizer=LabelTokenizerSingleStateFeaturizer(...)`` -(see :ref:`featurization` for details). - -**Configuration:** - - Configuration parameters can be passed as parameters to the - ``EmbeddingPolicy`` within the policy configuration file. - - .. warning:: - - Pass an appropriate number of ``epochs`` to the ``EmbeddingPolicy``, - otherwise the policy will be trained only for ``1`` - epoch. - - The algorithm also has hyper-parameters to control: - - - neural network's architecture: - - - ``hidden_layers_sizes_b`` sets a list of hidden layers - sizes before embedding layer for system actions, the number - of hidden layers is equal to the length of the list; - - ``transformer_size`` sets the number of units in the transfomer; - - ``num_transformer_layers`` sets the number of transformer layers; - - ``pos_encoding`` sets the type of positional encoding in transformer, - it should be either ``timing`` or ``emb``; - - ``max_seq_length`` sets maximum sequence length - if embedding positional encodings are used; - - ``num_heads`` sets the number of heads in multihead attention; - - - training: - - - ``batch_size`` sets the number of training examples in one - forward/backward pass, the higher the batch size, the more - memory space you'll need; - - ``batch_strategy`` sets the type of batching strategy, - it should be either ``sequence`` or ``balanced``; - - ``epochs`` sets the number of times the algorithm will see - training data, where one ``epoch`` equals one forward pass and - one backward pass of all the training examples; - - ``random_seed`` if set to any int will get reproducible - training results for the same inputs; - - - embedding: - - - ``embed_dim`` sets the dimension of embedding space; - - ``num_neg`` sets the number of incorrect intent labels, - the algorithm will minimize their similarity to the user - input during training; - - ``similarity_type`` sets the type of the similarity, - it should be either ``auto``, ``cosine`` or ``inner``, - if ``auto``, it will be set depending on ``loss_type``, - ``inner`` for ``softmax``, ``cosine`` for ``margin``; - - ``loss_type`` sets the type of the loss function, - it should be either ``softmax`` or ``margin``; - - ``mu_pos`` controls how similar the algorithm should try - to make embedding vectors for correct intent labels, - used only if ``loss_type`` is set to ``margin``; - - ``mu_neg`` controls maximum negative similarity for - incorrect intents, - used only if ``loss_type`` is set to ``margin``; - - ``use_max_sim_neg`` if ``true`` the algorithm only - minimizes maximum similarity over incorrect intent labels, - used only if ``loss_type`` is set to ``margin``; - - ``scale_loss`` if ``true`` the algorithm will downscale the loss - for examples where correct label is predicted with high confidence, - used only if ``loss_type`` is set to ``softmax``; - - - regularization: - - - ``C2`` sets the scale of L2 regularization - - ``C_emb`` sets the scale of how important is to minimize - the maximum similarity between embeddings of different - intent labels, used only if ``loss_type`` is set to ``margin``; - - ``droprate_a`` sets the dropout rate between - layers before embedding layer for user inputs; - - ``droprate_b`` sets the dropout rate between layers - before embedding layer for system actions; - - - train accuracy calculation: - - - ``evaluate_every_num_epochs`` sets how often to calculate - train accuracy, small values may hurt performance; - - ``evaluate_on_num_examples`` how many examples to use for - hold out validation set to calculate of validation accuracy, - large values may hurt performance. - - .. warning:: - - Default ``max_history`` for this policy is ``None`` which means it'll use - the ``FullDialogueTrackerFeaturizer``. We recommend to set ``max_history`` to - some finite value in order to use ``MaxHistoryTrackerFeaturizer`` - for **faster training**. See :ref:`featurization` for details. - We recommend to increase ``batch_size`` for ``MaxHistoryTrackerFeaturizer`` - (e.g. ``"batch_size": [32, 64]``) - - .. warning:: - - If ``evaluate_on_num_examples`` is non zero, random examples will be - picked by stratified split and used as **hold out** validation set, - so they will be excluded from training data. - We suggest to set it to zero if data set contains a lot of unique examples - of dialogue turns - - .. note:: - - Droprate should be between ``0`` and ``1``, e.g. - ``droprate=0.1`` would drop out ``10%`` of input units. - - .. note:: - - For ``cosine`` similarity ``mu_pos`` and ``mu_neg`` should - be between ``-1`` and ``1``. - - .. note:: - - There is an option to use linearly increasing batch size. - The idea comes from `<https://arxiv.org/abs/1711.00489>`_. - In order to do it pass a list to ``batch_size``, e.g. - ``"batch_size": [8, 32]`` (default behaviour). If constant - ``batch_size`` is required, pass an ``int``, e.g. - ``"batch_size": 8``. - - These parameters can be specified in the policy configuration file. - The default values are defined in ``EmbeddingPolicy.defaults``: - - .. literalinclude:: ../../rasa/core/policies/embedding_policy.py - :dedent: 4 - :start-after: # default properties (DOC MARKER - don't remove) - :end-before: # end default properties (DOC MARKER - don't remove) - - .. note:: - - Parameter ``mu_neg`` is set to a negative value to mimic - the original starspace algorithm in the case - ``mu_neg = mu_pos`` and ``use_max_sim_neg = False``. See - `starspace paper <https://arxiv.org/abs/1709.03856>`_ for details. - -.. _mapping-policy: - -Mapping Policy -^^^^^^^^^^^^^^ - -The ``MappingPolicy`` can be used to directly map intents to actions. The -mappings are assigned by giving an intent the property ``triggers``, e.g.: - -.. code-block:: yaml - - intents: - - ask_is_bot: - triggers: action_is_bot - -An intent can only be mapped to at most one action. The bot will run -the mapped action once it receives a message of the triggering intent. Afterwards, -it will listen for the next message. With the next -user message, normal prediction will resume. - -If you do not want your intent-action mapping to affect the dialogue -history, the mapped action must return a ``UserUtteranceReverted()`` -event. This will delete the user's latest message, along with any events that -happened after it, from the dialogue history. This means you should not -include the intent-action interaction in your stories. - -For example, if a user asks "Are you a bot?" off-topic in the middle of the -flow, you probably want to answer without that interaction affecting the next -action prediction. A triggered custom action can do anything, but here's a -simple example that dispatches a bot utterance and then reverts the interaction: - -.. code-block:: python - - class ActionIsBot(Action): - """Revertible mapped action for utter_is_bot""" - - def name(self): - return "action_is_bot" - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_template("utter_is_bot", tracker) - return [UserUtteranceReverted()] - -.. note:: - - If you use the ``MappingPolicy`` to predict bot utterances directly (e.g. - ``triggers: utter_{}``), these interactions must go in your stories, as in this - case there is no ``UserUtteranceReverted()`` and the - intent and the mapped utterance will appear in the dialogue history. - -.. note:: - - The MappingPolicy is also responsible for executing the default actions ``action_back`` - and ``action_restart`` in response to ``/back`` and ``/restart``. If it is not included - in your policy example these intents will not work. - -Memoization Policy -^^^^^^^^^^^^^^^^^^ - -The ``MemoizationPolicy`` just memorizes the conversations in your -training data. It predicts the next action with confidence ``1.0`` -if this exact conversation exists in the training data, otherwise it -predicts ``None`` with confidence ``0.0``. - -.. _fallback-policy: - -Fallback Policy -^^^^^^^^^^^^^^^ - -The ``FallbackPolicy`` invokes a :ref:`fallback action -<fallback-actions>` if at least one of the following occurs: -1. The intent recognition has a confidence below ``nlu_threshold``. -2. The highest ranked intent differs in confidence with the second highest ranked intent -by less than ``ambiguity_threshold``. -3. None of the dialogue policies predict an action with confidence higher than ``core_threshold``. - -**Configuration:** - - The thresholds and fallback action can be adjusted in the policy configuration - file as parameters of the ``FallbackPolicy``: - - .. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.3 - ambiguity_threshold: 0.1 - core_threshold: 0.3 - fallback_action_name: 'action_default_fallback' - - +----------------------------+---------------------------------------------+ - | ``nlu_threshold`` | Min confidence needed to accept an NLU | - | | prediction | - +----------------------------+---------------------------------------------+ - | ``ambiguity_threshold`` | Min amount by which the confidence of the | - | | top intent must exceed that of the second | - | | highest ranked intent. | - +----------------------------+---------------------------------------------+ - | ``core_threshold`` | Min confidence needed to accept an action | - | | prediction from Rasa Core | - +----------------------------+---------------------------------------------+ - | ``fallback_action_name`` | Name of the :ref:`fallback action | - | | <fallback-actions>` | - | | to be called if the confidence of intent | - | | or action is below the respective threshold | - +----------------------------+---------------------------------------------+ - - You can also configure the ``FallbackPolicy`` in your python code: - - .. code-block:: python - - from rasa.core.policies.fallback import FallbackPolicy - from rasa.core.policies.keras_policy import KerasPolicy - from rasa.core.agent import Agent - - fallback = FallbackPolicy(fallback_action_name="action_default_fallback", - core_threshold=0.3, - nlu_threshold=0.3, - ambiguity_threshold=0.1) - - agent = Agent("domain.yml", policies=[KerasPolicy(), fallback]) - - .. note:: - - You can include either the ``FallbackPolicy`` or the - ``TwoStageFallbackPolicy`` in your configuration, but not both. - - -Two-Stage Fallback Policy -^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``TwoStageFallbackPolicy`` handles low NLU confidence in multiple stages -by trying to disambiguate the user input. - -- If an NLU prediction has a low confidence score or is not significantly higher - than the second highest ranked prediction, the user is asked to affirm - the classification of the intent. - - - If they affirm, the story continues as if the intent was classified - with high confidence from the beginning. - - If they deny, the user is asked to rephrase their message. - -- Rephrasing - - - If the classification of the rephrased intent was confident, the story - continues as if the user had this intent from the beginning. - - If the rephrased intent was not classified with high confidence, the user - is asked to affirm the classified intent. - -- Second affirmation - - - If the user affirms the intent, the story continues as if the user had - this intent from the beginning. - - If the user denies, the original intent is classified as the specified - ``deny_suggestion_intent_name``, and an ultimate fallback action - is triggered (e.g. a handoff to a human). - -**Configuration:** - - To use the ``TwoStageFallbackPolicy``, include the following in your - policy configuration. - - .. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - ambiguity_threshold: 0.1 - core_threshold: 0.3 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" - - +-------------------------------+------------------------------------------+ - | ``nlu_threshold`` | Min confidence needed to accept an NLU | - | | prediction | - +-------------------------------+------------------------------------------+ - | ``ambiguity_threshold`` | Min amount by which the confidence of the| - | | top intent must exceed that of the second| - | | highest ranked intent. | - +--------------------------------------------------------------------------+ - | ``core_threshold`` | Min confidence needed to accept an action| - | | prediction from Rasa Core | - +-------------------------------+------------------------------------------+ - | ``fallback_core_action_name`` | Name of the :ref:`fallback action | - | | <fallback-actions>` | - | | to be called if the confidence of Rasa | - | | Core action prediction is below the | - | | ``core_threshold``. This action is | - | | to propose the recognized intents | - +-------------------------------+------------------------------------------+ - | ``fallback_nlu_action_name`` | Name of the :ref:`fallback action | - | | <fallback-actions>` | - | | to be called if the confidence of Rasa | - | | NLU intent classification is below the | - | | ``nlu_threshold``. This action is called | - | | when the user denies the second time | - +-------------------------------+------------------------------------------+ - |``deny_suggestion_intent_name``| The name of the intent which is used to | - | | detect that the user denies the suggested| - | | intents | - +-------------------------------+------------------------------------------+ - - .. note:: - - You can include either the ``FallbackPolicy`` or the - ``TwoStageFallbackPolicy`` in your configuration, but not both. - - - -Form Policy -^^^^^^^^^^^ - -The ``FormPolicy`` is an extension of the ``MemoizationPolicy`` which -handles the filling of forms. Once a ``FormAction`` is called, the -``FormPolicy`` will continually predict the ``FormAction`` until all required -slots in the form are filled. For more information, see :ref:`forms`. diff --git a/docs/core/responses.rst b/docs/core/responses.rst deleted file mode 100644 index 362734f3498e..000000000000 --- a/docs/core/responses.rst +++ /dev/null @@ -1,150 +0,0 @@ -:desc: Read how to define assistant utterances or use a service to generate the - responses using Rasa as an open source chat assistant platform. - -.. _responses: - -Responses -========= - -.. edit-link:: - -If you want your assistant to respond to user messages, you need to manage -these responses. In the training data for your bot, -your stories, you specify the actions your bot -should execute. These actions -can use utterances to send messages back to the user. - -There are three ways to manage these utterances: - -1. Utterances are normally stored in your domain file, see :ref:`here <domain-utterances>` -2. Retrieval action responses are part of the training data, see :ref:`here <retrieval-actions>` -3. You can also create a custom NLG service to generate responses, see :ref:`here <custom-nlg-service>` - -.. _domain-utterances: - -Including the utterances in the domain --------------------------------------- - -The default format is to include the utterances in your domain file. -This file then contains references to all your custom actions, -available entities, slots and intents. - -.. literalinclude:: ../../data/test_domains/default_with_slots.yml - :language: yaml - -In this example domain file, the section ``templates`` contains the -template the assistant uses to send messages to the user. - -If you want to change the text, or any other part of the bots response, -you need to retrain the assistant before these changes will be picked up. - -More details about the format of these responses can be found in the -documentation about the domain file format: :ref:`utter_templates`. - -.. _custom-nlg-service: - -Creating your own NLG service for bot responses ------------------------------------------------ - -Retraining the bot just to change the text copy can be suboptimal for -some workflows. That's why Core also allows you to outsource the -response generation and separate it from the dialogue learning. - -The assistant will still learn to predict actions and to react to user input -based on past dialogues, but the responses it sends back to the user -are generated outside of Rasa Core. - -If the assistant wants to send a message to the user, it will call an -external HTTP server with a ``POST`` request. To configure this endpoint, -you need to create an ``endpoints.yml`` and pass it either to the ``run`` -or ``server`` script. The content of the ``endpoints.yml`` should be - -.. literalinclude:: ../../data/test_endpoints/example_endpoints.yml - :language: yaml - -Then pass the ``enable-api`` flag to the ``rasa run`` command when starting -the server: - -.. code-block:: shell - - $ rasa run \ - --enable-api \ - -m examples/babi/models \ - --log-file out.log \ - --endpoints endpoints.yml - - -The body of the ``POST`` request sent to the endpoint will look -like this: - -.. code-block:: json - - { - "tracker": { - "latest_message": { - "text": "/greet", - "intent_ranking": [ - { - "confidence": 1.0, - "name": "greet" - } - ], - "intent": { - "confidence": 1.0, - "name": "greet" - }, - "entities": [] - }, - "sender_id": "22ae96a6-85cd-11e8-b1c3-f40f241f6547", - "paused": false, - "latest_event_time": 1531397673.293572, - "slots": { - "name": null - }, - "events": [ - { - "timestamp": 1531397673.291998, - "event": "action", - "name": "action_listen" - }, - { - "timestamp": 1531397673.293572, - "parse_data": { - "text": "/greet", - "intent_ranking": [ - { - "confidence": 1.0, - "name": "greet" - } - ], - "intent": { - "confidence": 1.0, - "name": "greet" - }, - "entities": [] - }, - "event": "user", - "text": "/greet" - } - ] - }, - "arguments": {}, - "template": "utter_greet", - "channel": { - "name": "collector" - } - } - -The endpoint then needs to respond with the generated response: - -.. code-block:: json - - { - "text": "hey there", - "buttons": [], - "image": null, - "elements": [], - "attachments": [] - } - -Rasa will then use this response and sent it back to the user. diff --git a/docs/core/retrieval-actions.rst b/docs/core/retrieval-actions.rst deleted file mode 100644 index acb1b5f8576d..000000000000 --- a/docs/core/retrieval-actions.rst +++ /dev/null @@ -1,230 +0,0 @@ -:desc: Use a retrieval model to select chatbot responses - in open source bot framework Rasa. - -.. _retrieval-actions: - -Retrieval Actions -================= - -.. edit-link:: - -.. warning:: - This feature is experimental. - We introduce experimental features to get feedback from our community, so we encourage you to try it out! - However, the functionality might be changed or removed in the future. - If you have feedback (positive or negative) please share it with us on the `forum <https://forum.rasa.com>`_. - -.. contents:: - :local: - -About -^^^^^ - -Retrieval actions are designed to make it simpler to work with :ref:`small-talk` and :ref:`simple-questions` . -For example, if your assistant can handle 100 FAQs and 50 different small talk intents, you can use a single retrieval -action to cover all of these. -From a dialogue perspective, these single-turn exchanges can all be treated equally, so this simplifies your stories. - -Instead of having a lot of stories like: - -.. code-block:: story - - ## weather - * ask_weather - - utter_ask_weather - - ## introduction - * ask_name - - utter_introduce_myself - - ... - - -You can cover all of these with a single story where the above intents are grouped under a common ``chitchat`` intent: - - -.. code-block:: story - - ## chitchat - * chitchat - - respond_chitchat - -A retrieval action uses the output of a :ref:`response-selector` component from NLU which learns a -retrieval model to predict the correct response from a list of candidate responses given a user message text. - -Training Data -^^^^^^^^^^^^^ - -Like the name suggests, retrieval actions learn to select the correct response from a list of candidates. -As with other NLU data, you need to include examples of what your users will say in your NLU file: - -.. code-block:: md - - ## intent: chitchat/ask_name - - what's your name - - who are you? - - what are you called? - - ## intent: chitchat/ask_weather - - how's weather? - - is it sunny where you are? - -First, all of these examples will be combined into a single ``chitchat`` retrieval intent that NLU will predict. -All retrieval intents have a suffix added to them which identifies a particular response text for your assistant, in the -above example - ``ask_name`` and ``ask_weather``. The suffix is separated from the intent name by a ``/`` delimiter - -Next, include response texts for all retrieval intents in a **separate** training data file as ``responses.md``: - -.. code-block:: md - - ## ask name - * chitchat/ask_name - - my name is Sara, Rasa's documentation bot! - - ## ask weather - * chitchat/ask_weather - - it's always sunny where I live - -The retrieval model is trained separately as part of the NLU training pipeline to select the correct response. -One important thing to remember is that the retrieval model uses the text of the response messages -to select the correct one. If you change the text of these responses, you have to retrain your retrieval model! -This is a key difference to the response templates in your domain file. - -.. note:: - The file containing response texts must exist as a separate file inside the training data directory passed - to the training process. The contents of it cannot be a part of the file which contains training data for other - components of NLU. - -Config File -^^^^^^^^^^^ - -You need to include the :ref:`response-selector` component in your config. The component needs a tokenizer, a featurizer and an -intent classifier to operate on the user message before it can predict a response and hence these -components should be placed before ``ResponseSelector`` in the NLU configuration. An example: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "EmbeddingIntentClassifier" - - name: "ResponseSelector" - -Domain -^^^^^^ - -Rasa uses a naming convention to match the intent names like ``chitchat/ask_name`` -to the retrieval action. -The correct action name in this case is ``respond_chitchat``. The prefix ``respond_`` is mandatory to identify it as a -retrieval action. Another example - correct action name for ``faq/ask_policy`` would be ``respond_faq`` -To include this in your domain, add it to the list of actions: - -.. code-block:: yaml - - actions: - ... - - respond_chitchat - - respond_faq - - -A simple way to ensure that the retrieval action is predicted after the chitchat -intent is to use the :ref:`mapping-policy`. -However, you can also include this action in your stories. -For example, if you want to repeat a question after handling chitchat -(see :ref:`unhappy-paths` ) - -.. code-block:: story - - ## interruption - * search_restaurant - - utter_ask_cuisine - * chitchat - - respond_chitchat - - utter_ask_cuisine - -Multiple Retrieval Actions -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If your assistant includes both FAQs **and** chitchat, it is possible to -separate these into separate retrieval actions, for example having intents -like ``chitchat/ask_weather`` and ``faq/returns_policy``. -Rasa supports adding multiple ``RetrievalActions`` like ``respond_chitchat`` and ``respond_returns_policy`` -To train separate retrieval models for each of the intents, you need to include a separate ``ResponseSelector`` -component in the config: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "EmbeddingIntentClassifier" - - name: "ResponseSelector" - retrieval_intent: chitchat - - name: "ResponseSelector" - retrieval_intent: faq - -You could still have two separate retrieval actions but both actions can share the same retrieval model by specifying a single - ``ResponseSelector`` component and leaving the ``retrieval_intent`` to its default value(None): - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "EmbeddingIntentClassifier" - - name: "ResponseSelector" - - -In this case, the response selector will be trained on examples from both ``chitchat/{x}`` and ``faq/{x}`` and will be -identified by the name ``default`` the NLU parsed output. - -In our experiments so far, having separate retrieval models does **not** make any difference to the accuracy of each -retrieval action. So for simplicity, we recommend you use a single retrieval -model for both chitchat and FAQs -If you get different results, please let us know in the `forum <https://forum.rasa.com>`_ ! - - -Parsing Response Selector Output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The parsed output from NLU will have a property named ``response_selector`` containing the output for -each response selector. Each response selector is identified by ``retrieval_intent`` parameter of that response selector -and stores two properties - - - - ``response``: The predicted response text and the prediction confidence. - - ``ranking``: Ranking with confidences of top 10 candidate responses. - -Example result: - -.. code-block:: json - - { - "text": "What is the recommend python version to install?", - "entities": [], - "intent": {"confidence": 0.6485910906220309, "name": "faq"}, - "intent_ranking": [ - {"confidence": 0.6485910906220309, "name": "faq"}, - {"confidence": 0.1416153159565678, "name": "greet"} - ], - "response_selector": { - "faq": { - "response": {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - "ranking": [ - {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - {"confidence": 0.2134543431, "name": "You can ask me about how to get started"} - ] - } - } - } - -If the ``retrieval_intent`` parameter of a particular response selector was left to its default value, -the corresponding response selector will be identified as ``default`` in the returned output. \ No newline at end of file diff --git a/docs/core/slots.rst b/docs/core/slots.rst deleted file mode 100644 index a4a19e80b5bc..000000000000 --- a/docs/core/slots.rst +++ /dev/null @@ -1,346 +0,0 @@ -:desc: Store information the user provided as well as information from database - queries in slots to influence how the machine learning based dialogue - continues. - -.. _slots: - -Slots -===== - -.. edit-link:: - -.. contents:: - :local: - -What are slots? ---------------- - -**Slots are your bot's memory.** They act as a key-value store -which can be used to store information the user provided (e.g their home city) -as well as information gathered about the outside world (e.g. the result of a -database query). - -Most of the time, you want slots to influence how the dialogue progresses. -There are different slot types for different behaviors. - -For example, if your user has provided their home city, you might -have a ``text`` slot called ``home_city``. If the user asks for the -weather, and you *don't* know their home city, you will have to ask -them for it. A ``text`` slot only tells Rasa Core whether the slot -has a value. The specific value of a ``text`` slot (e.g. Bangalore -or New York or Hong Kong) doesn't make any difference. - -If the value itself is important, use a ``categorical`` or a ``bool`` slot. -There are also ``float``, and ``list`` slots. -If you just want to store some data, but don't want it to affect the flow -of the conversation, use an ``unfeaturized`` slot. - - -How Rasa Uses Slots -------------------- - -The ``Policy`` doesn't have access to the -value of your slots. It receives a featurized representation. -As mentioned above, for a ``text`` slot the value is irrelevant. -The policy just sees a ``1`` or ``0`` depending on whether it is set. - -**You should choose your slot types carefully!** - -How Slots Get Set ------------------ - -You can provide an initial value for a slot in your domain file: - -.. code-block:: yaml - - slots: - name: - type: text - initial_value: "human" - - -There are multiple ways that slots are set during a conversation: - -Slots Set from NLU -~~~~~~~~~~~~~~~~~~ - -If your NLU model picks up an entity, and your domain contains a -slot with the same name, the slot will be set automatically. For example: - -.. code-block:: story - - # story_01 - * greet{"name": "Ali"} - - slot{"name": "Ali"} - - utter_greet - -In this case, you don't have to include the ``- slot{}`` part in the -story, because it is automatically picked up. - -To disable this behavior for a particular slot, you can set the -``auto_fill`` attribute to ``False`` in the domain file: - -.. code-block:: yaml - - slots: - name: - type: text - auto_fill: False - - -Slots Set By Clicking Buttons -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use buttons as a shortcut. -Rasa Core will send messages starting with a ``/`` to the -``RegexInterpreter``, which expects NLU input in the same format -as in story files, e.g. ``/intent{entities}``. For example, if you let -users choose a color by clicking a button, the button payloads might -be ``/choose{"color": "blue"}`` and ``/choose{"color": "red"}``. - -You can specify this in your domain file like this: -(see details in :ref:`domains`) - -.. code-block:: yaml - - utter_ask_color: - - text: "what color would you like?" - buttons: - - title: "blue" - payload: '/choose{"color": "blue"}' - - title: "red" - payload: '/choose{"color": "red"}' - - -Slots Set by Actions -~~~~~~~~~~~~~~~~~~~~ - -The second option is to set slots by returning events in :ref:`custom actions <custom-actions>`. -In this case, your stories need to include the slots. -For example, you have a custom action to fetch a user's profile, and -you have a ``categorical`` slot called ``account_type``. -When the ``fetch_profile`` action is run, it returns a -:class:`rasa.core.events.SlotSet` event: - -.. code-block:: yaml - - slots: - account_type: - type: categorical - values: - - premium - - basic - -.. code-block:: python - - from rasa_sdk.actions import Action - from rasa_sdk.events import SlotSet - import requests - - class FetchProfileAction(Action): - def name(self): - return "fetch_profile" - - def run(self, dispatcher, tracker, domain): - url = "http://myprofileurl.com" - data = requests.get(url).json - return [SlotSet("account_type", data["account_type"])] - - -.. code-block:: story - - # story_01 - * greet - - action_fetch_profile - - slot{"account_type" : "premium"} - - utter_welcome_premium - - # story_02 - * greet - - action_fetch_profile - - slot{"account_type" : "basic"} - - utter_welcome_basic - - -In this case you **do** have to include the ``- slot{}`` part in your stories. -Rasa Core will learn to use this information to decide on the correct action to -take (in this case, ``utter_welcome_premium`` or ``utter_welcome_basic``). - -.. note:: - It is **very easy** to forget about slots if you are writing - stories by hand. We strongly recommend that you build up these - stories using :ref:`section_interactive_learning_forms` rather than writing them. - - -.. _slot-classes: - -Slot Types ----------- - -Text Slot -~~~~~~~~~ - -.. option:: text - - :Use For: User preferences where you only care whether or not they've - been specified. - :Example: - .. sourcecode:: yaml - - slots: - cuisine: - type: text - :Description: - Results in the feature of the slot being set to ``1`` if any value is set. - Otherwise the feature will be set to ``0`` (no value is set). - -Boolean Slot -~~~~~~~~~~~~ - -.. option:: bool - - :Use For: True or False - :Example: - .. sourcecode:: yaml - - slots: - is_authenticated: - type: bool - :Description: - Checks if slot is set and if True - -Categorical Slot -~~~~~~~~~~~~~~~~ - -.. option:: categorical - - :Use For: Slots which can take one of N values - :Example: - .. sourcecode:: yaml - - slots: - risk_level: - type: categorical - values: - - low - - medium - - high - - :Description: - Creates a one-hot encoding describing which of the ``values`` matched. - -Float Slot -~~~~~~~~~~ - -.. option:: float - - :Use For: Continuous values - :Example: - .. sourcecode:: yaml - - slots: - temperature: - type: float - min_value: -100.0 - max_value: 100.0 - - :Defaults: ``max_value=1.0``, ``min_value=0.0`` - :Description: - All values below ``min_value`` will be treated as ``min_value``, the same - happens for values above ``max_value``. Hence, if ``max_value`` is set to - ``1``, there is no difference between the slot values ``2`` and ``3.5`` in - terms of featurization (e.g. both values will influence the dialogue in - the same way and the model can not learn to differentiate between them). - -List Slot -~~~~~~~~~ - -.. option:: list - - :Use For: Lists of values - :Example: - .. sourcecode:: yaml - - slots: - shopping_items: - type: list - :Description: - The feature of this slot is set to ``1`` if a value with a list is set, - where the list is not empty. If no value is set, or the empty list is the - set value, the feature will be ``0``. The **length of the list stored in - the slot does not influence the dialogue**. - -.. _unfeaturized-slot: - -Unfeaturized Slot -~~~~~~~~~~~~~~~~~ - -.. option:: unfeaturized - - :Use For: Data you want to store which shouldn't influence the dialogue flow - :Example: - .. sourcecode:: yaml - - slots: - internal_user_id: - type: unfeaturized - :Description: - There will not be any featurization of this slot, hence its value does - not influence the dialogue flow and is ignored when predicting the next - action the bot should run. - -Custom Slot Types ------------------ - -Maybe your restaurant booking system can only handle bookings -for up to 6 people. In this case you want the *value* of the -slot to influence the next selected action (and not just whether -it's been specified). You can do this by defining a custom slot class. - -In the code below, we define a slot class called ``NumberOfPeopleSlot``. -The featurization defines how the value of this slot gets converted to a vector -to our machine learning model can deal with. -Our slot has three possible "values", which we can represent with -a vector of length ``2``. - -+---------------+------------------------------------------+ -| ``(0,0)`` | not yet set | -+---------------+------------------------------------------+ -| ``(1,0)`` | between 1 and 6 | -+---------------+------------------------------------------+ -| ``(0,1)`` | more than 6 | -+---------------+------------------------------------------+ - - -.. testcode:: - - from rasa.core.slots import Slot - - class NumberOfPeopleSlot(Slot): - - def feature_dimensionality(self): - return 2 - - def as_feature(self): - r = [0.0] * self.feature_dimensionality() - if self.value: - if self.value <= 6: - r[0] = 1.0 - else: - r[1] = 1.0 - return r - -Now we also need some training stories, so that Rasa Core -can learn from these how to handle the different situations: - - -.. code-block:: story - - # story1 - ... - * inform{"people": "3"} - - action_book_table - ... - # story2 - * inform{"people": "9"} - - action_explain_table_limit diff --git a/docs/core/stories.rst b/docs/core/stories.rst deleted file mode 100644 index 6d1198a2eb02..000000000000 --- a/docs/core/stories.rst +++ /dev/null @@ -1,215 +0,0 @@ -:desc: Stories are used to teach Rasa real conversation designs to learn - from providing the basis for a scalable machine learning dialogue management. - -.. _stories: - -Stories -======= - -.. edit-link:: - -.. contents:: - :local: - -Rasa stories are a form of training data used to train the Rasa's dialogue management models. - -A story is a representation of a conversation between a user and an AI assistant, converted into a specific format where user inputs are expressed as corresponding intents (and entities where necessary) while the responses of an assistant are expressed as corresponding action names. - -A training example for the Rasa Core dialogue system is called a **story**. -This is a guide to the story data format. - -.. note:: - You can also **spread your stories across multiple files** and specify the - folder containing the files for most of the scripts (e.g. training, - visualization). The stories will be treated as if they would have - been part of one large file. - - -Format ------- - -Here's an example of a dialogue in the Rasa story format: - -.. code-block:: story - - ## greet + location/price + cuisine + num people <!-- name of the story - just for debugging --> - * greet - - action_ask_howcanhelp - * inform{"location": "rome", "price": "cheap"} <!-- user utterance, in format intent{entities} --> - - action_on_it - - action_ask_cuisine - * inform{"cuisine": "spanish"} - - action_ask_numpeople <!-- action that the bot should execute --> - * inform{"people": "six"} - - action_ack_dosearch - - -What makes up a story? -~~~~~~~~~~~~~~~~~~~~~~ - -- A story starts with a name preceded by two hashes ``## story_03248462``. - You can call the story anything you like, but it can be very useful for - debugging to give them descriptive names! -- The end of a story is denoted by a newline, and then a new story - starts again with ``##``. -- Messages sent by the user are shown as lines starting with ``*`` - in the format ``intent{"entity1": "value", "entity2": "value"}``. -- Actions executed by the bot are shown as lines starting with ``-`` - and contain the name of the action. -- Events returned by an action are on lines immediately after that action. - For example, if an action returns a ``SlotSet`` event, this is shown as - ``slot{"slot_name": "value"}``. - - -User Messages -~~~~~~~~~~~~~ -While writing stories, you do not have to deal with the specific contents of -the messages that the users send. Instead, you can take advantage of the output -from the NLU pipeline, which lets you use just the combination of an intent and -entities to refer to all the possible messages the users can send to mean the -same thing. - -It is important to include the entities here as well because the policies learn -to predict the next action based on a *combination* of both the intent and -entities (you can, however, change this behavior using the -:ref:`use_entities <use_entities>` attribute). - -Actions -~~~~~~~ -While writing stories, you will encounter two types of actions: utterances -and custom actions. Utterances are hardcoded messages that a bot can respond -with. Custom actions, on the other hand, involve custom code being executed. - -All actions (both utterances and custom actions) executed by the bot are shown -as lines starting with ``-`` followed by the name of the action. - -All utterances must begin with the prefix ``utter_``, and must match the name -of the template defined in the domain. - -For custom actions, the action name is the string you choose to return from -the ``name`` method of the custom action class. Although there is no restriction -on naming your custom actions (unlike utterances), the best practice here is to -prefix the name with ``action_``. - -Events -~~~~~~ -Events such as setting a slot or activating/deactivating a form have to be -explicitly written out as part of the stories. Having to include the events -returned by a custom action separately, when that custom action is already -part of a story might seem redundant. However, since Rasa cannot -determine this fact during training, this step is necessary. - -You can read more about events :ref:`here <end_to_end_evaluation>`. - -Slot Events -*********** -Slot events are written as ``- slot{"slot_name": "value"}``. If this slot is set -inside a custom action, it is written on the line immediately following the -custom action event. If your custom action resets a slot value to `None`, the -corresponding event for that would be ``-slot{"slot_name": null}``. - -Form Events -*********** -There are three kinds of events that need to be kept in mind while dealing with -forms in stories. - -- A form action event (e.g. ``- restaurant_form``) is used in the beginning when first starting a form, and also while resuming the form action when the form is already active. -- A form activation event (e.g. ``- form{"name": "restaurant_form"}``) is used right after the first form action event. -- A form deactivation event (e.g. ``- form{"name": null}``), which is used to deactivate the form. - - -.. note:: - In order to get around the pitfall of forgetting to add events, the recommended - way to write these stories is to use :ref:`interactive learning <interactive-learning>`. - - -Writing Fewer and Shorter Stories ---------------------------------- - - -Checkpoints -~~~~~~~~~~~ - -You can use ``> checkpoints`` to modularize and simplify your training -data. Checkpoints can be useful, but **do not overuse them**. Using -lots of checkpoints can quickly make your example stories hard to -understand. It makes sense to use them if a story block is repeated -very often in different stories, but stories *without* checkpoints -are easier to read and write. Here is an example story file which -contains checkpoints (note that you can attach more than one checkpoint -at a time): - -.. code-block:: story - - ## first story - * greet - - action_ask_user_question - > check_asked_question - - ## user affirms question - > check_asked_question - * affirm - - action_handle_affirmation - > check_handled_affirmation - - ## user denies question - > check_asked_question - * deny - - action_handle_denial - > check_handled_denial - - ## user leaves - > check_handled_denial - > check_handled_affirmation - * goodbye - - utter_goodbye - -.. note:: - Unlike regular stories, checkpoints are not restricted to starting with an - input from the user. As long as the checkpoint is inserted at the right points - in the main stories, the first event can be an action or an utterance - as well. - -.. note:: - Unlike regular stories, checkpoints are not restricted to starting with an - input from the user. As long as the checkpoint is inserted at the right points - in the main stories, the first event can be an action or an utterance - as well. - - -OR Statements -~~~~~~~~~~~~~ - -Another way to write shorter stories, or to handle multiple intents -the same way, is to use an ``OR`` statement. For example, if you ask -the user to confirm something, and you want to treat the ``affirm`` -and ``thankyou`` intents in the same way. The story below will be -converted into two stories at training time: - - -.. code-block:: story - - ## story - ... - - utter_ask_confirm - * affirm OR thankyou - - action_handle_affirmation - -Just like checkpoints, ``OR`` statements can be useful, but if you are using a -lot of them, it is probably better to restructure your domain and/or intents. - - -.. warning:: - Overusing these features (both checkpoints and OR statements) - will slow down training. - - -End-to-End Story Evaluation Format ----------------------------------- - -The end-to-end story format is a format that combines both NLU and Core training data -into a single file for evaluation. You can read more about it -:ref:`here <end_to_end_evaluation>`. - -.. warning:: - This format is only used for end-to-end evaluation and cannot be used for training. diff --git a/docs/dialogue-elements/completing-tasks.rst b/docs/dialogue-elements/completing-tasks.rst deleted file mode 100644 index 8db71a4f3489..000000000000 --- a/docs/dialogue-elements/completing-tasks.rst +++ /dev/null @@ -1,120 +0,0 @@ -:desc: Read about common dialogue patterns encountered by task-oriented - bots and how best to handle them using Rasa's open source dialogue - management system. - -.. _completing-tasks: - -================ -Completing Tasks -================ - -.. edit-link:: - -.. contents:: - :local: - -.. _simple-questions: - -Simple Questions ----------------- - -Simple questions, or FAQs, should receive the same answer -no matter what happened previously in the conversation. -Users will often ask a basic set of questions -and your assistant should answer them reliably. - -.. conversations:: - examples: - - - - what's your email address? - - ( it's contact@example.com - - - - do you have a loyalty program? - - ( unfortunately we don't - -Just like greetings and goodbyes, you can use the mapping policy to achieve this. -See :ref:`greetings`. - -Business Logic --------------- - -.. note:: - There is an in-depth tutorial `here <https://blog.rasa.com/building-contextual-assistants-with-rasa-formaction/>`_ about how to use Rasa Forms for slot filling and business logic. - -Your AI assistant will often have to follow some pre-defined business logic. -To figure out how to help users, your assistant will often have to ask a few questions. -The answers you get will impact the rest of the conversation; for example, some products might -be limited to users in a certain country or above a certain age. It is good practice to -implement that logic inside a form, separating it from the learned behaviour. A single form -can cover all the happy paths (e.g. all the ways that a user can provide the required information). -You can read more about forms in `this tutorial <https://blog.rasa.com/building-contextual-assistants-with-rasa-formaction/>`_. - -.. conversations:: - examples: - - - - I'd like to apply for a loan - - ( I'd love to help. Which state are you in? - - Alaska - - ( Unfortunately, we only operate in the continental U.S. - - - - I'd like to apply for a loan - - ( I'd love to help. Which state are you in? - - California - - ( Thanks. Do you know what your credit score is? - - -See :ref:`conditional-logic` for details on how to use forms to implement business logic. - -Contextual Questions --------------------- - -Unlike answers to FAQs, correct responses to contextual questions depend on the conversation history. -These include questions which refer to something earlier in the conversation and are ambiguous -on their own. -Real users will often ask questions like "which is better?" and "why?". -It is frustrating for users if your assistant doesn't understand this, -and can only answer full questions like "which of your savings accounts has a better interest rate?" -Understanding contextual questions is a key difference between `level 2 and level 3 assistants <https://www.oreilly.com/ideas/the-next-generation-of-ai-assistants-in-enterprise>`_. - - -.. conversations:: - examples: - - - - ( what's your email address? - - why do you need to know that? - - ( I need your email so I can send you a confirmation - - - - ( are you currently a premium customer? - - what do you mean? - - ( We have different memberships. Check your statement to see if you are a premium member. - -.. _unhappy-paths: - -Unhappy Paths -------------- - -When your assistant asks a user for information, you will often get responses other -than the information you asked for. For example, the user might refuse to provide this information, -they might correct something they said earlier, or interrupt with chitchat. -It is important that your assistant can handle these edge cases. There -are so many things a user might say other than provide you the information you asked for, -and a simple interruption shouldn't throw off your assistant completely. -This is a key reason for building an assistant that can learn from real data. - -The best way to collect training data for unhappy paths is to use -:ref:`interactive-learning`. - -.. conversations:: - examples: - - - - ( what's your email address? - - no. - - ( I will need your email address in order to create an account. - - ( what's your email address? - - - - ( what's your email address? - - work@example.com - - ( thanks, and your phone number? - - no wait, please use personal@example.com - - ( ok, I'll use that email. - - ( thanks, and your phone number? diff --git a/docs/dialogue-elements/dialogue-elements.rst b/docs/dialogue-elements/dialogue-elements.rst deleted file mode 100644 index 13c3092d34ea..000000000000 --- a/docs/dialogue-elements/dialogue-elements.rst +++ /dev/null @@ -1,34 +0,0 @@ -:desc: Dialogue elements are an abstraction layer for your conversational AI platform - which describe common, recurring patterns in chatbot conversations. - -.. _dialogue-elements: - -Dialogue Elements -================= - -.. edit-link:: - -Dialogue elements are common conversation patterns. -We use three different levels of abstraction to discuss AI assistants. -This can be helpful in a product team, so that you have a common language -which designers, developers, and product owners can use to discuss -issues and new features. - -- highest level: user goals -- middle level: dialogue elements -- lowest level: intents, entities, actions, slots, and templates. - - - -.. note:: - Some chatbot tools use the word ``intent`` to refer to the user - goal. This is confusing because only some messages tell you what a user's - goal is. If a user says "I want to open an account" (``intent: open_account``), - that is clearly their goal. But most user messages ("yes", "what does that mean?", "I don't know") - aren't specific to one goal. In Rasa, every message has an intent, - and a user goal describes what a person wants to achieve. - - -.. image:: /_static/images/intents-user-goals-dialogue-elements.png - - diff --git a/docs/dialogue-elements/guiding-users.rst b/docs/dialogue-elements/guiding-users.rst deleted file mode 100644 index 473464cb33c1..000000000000 --- a/docs/dialogue-elements/guiding-users.rst +++ /dev/null @@ -1,193 +0,0 @@ -:desc: Read about dialogue patterns you can use to deliver a friendlier user - experience with your bot using Rasa's open source dialogue chat - assistant platform. - -.. _guiding-users: - -============= -Guiding Users -============= - -.. edit-link:: - -.. contents:: - :local: - -.. _implicit-confirmation: - -Implicit Confirmation ---------------------- - -Implicit confirmation involves repeating details back to the user to reassure -them that they were understood correctly. -This also gives the user a chance to intervene if your assistant misunderstood. - -.. conversations:: - examples: - - - - Can I get a large hawaiian and bbq pizza - - ( Sure, that's one large hawaiian and one regular bbq pizza. - - ( Anything else? - - No they should both be large! - - -.. _explicit-confirmation: - -Explicit Confirmation ---------------------- - -Explicit confirmation means asking the user to clarify how you should help them. -An important thing to remember about AI assistants is that **the user is never wrong**. -When a user tells you something like `I just moved`, they are being perfectly clear, -even if your assistant is not sure how to help them. -If the user goal is ambiguous to your assistant, ask for clarification. - - -.. conversations:: - examples: - - - - I just moved - - ( I'm not sure I understood you correctly. Do you mean ... - - ^ I want to cancel my contract - - ^ I want to update my personal details - - -You can configure the ``TwoStageFallbackPolicy`` to ask your user to clarify, -and present them with quick replies for the most likely intents. -To do this, configure the policy as in this example: - -.. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - core_threshold: 0.3 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" - ... - -.. _explaining-possibilities: - -Explaining Possibilities ------------------------- - -AI assistants are always limited to helping users with a specific set of -tasks, and should be able to explain to a user what they can do. -That includes coherently responding to requests that are out of scope. - - -.. conversations:: - examples: - - - - What can you do? - - ( I can help you update your personal details, change your plan, and answer any questions you have about our products. - - - - Can you get me a pizza? - - ( I'm afraid I can't help with that. - - ( I can help you update your personal details, change your plan, and answer any questions you have about our products. - - -When to explain the limitations of your assistant is specific to your application, -but these example stories show some common cases: - -.. code-block:: story - - ## user asks whats possible - * ask_whatspossible - - utter_explain_whatspossible - - ## user asks for something out of scope - * out_of_scope - - utter_cannot_help - - utter_explain_whatspossible - - -Collecting User Feedback ------------------------- - -Asking for feedback is one of the best tools you have to understand -your users and determine whether you solved their problem! -Storing this feedback is a powerful way to figure out how you can improve your assistant. - -.. conversations:: - examples: - - - - ( Was that helpful? - - no. - - ( Thanks. Why wasnt I able to help? - - ^ you didn't understand me correctly - - ^ you understood me, but your answers weren't very helpful. - - -Use a form to collect user feedback. To do this, define a custom form action -(see :ref:`forms` for more details about forms). - -.. code-block:: python - - from rasa_sdk.action import FormAction - - class FeedbackForm(FormAction): - - def name(self): - return "feedback_form" - - @staticmethod - def required_slots(tracker): - return ["feedback", "negative_feedback_reason"] - - -Add the form and slots to your domain: - - -.. code-block:: yaml - - forms: - - feedback_form - slots: - feedback: - type: bool - feedback_reason: - type: text - requested_slot: - type: text - -And make sure the ``FormPolicy`` is present in your configuration file: - -.. code-block:: yaml - - policies: - - FormPolicy - ... - - - -Handing off to a Human ----------------------- - -Users will be very frustrated if your assistant cannot help them and there is no way to reroute -the conversation to a human agent. There should always be a way to break out of a conversation! -There are multiple reasons why you might trigger a human handoff: - -* the user asks to speak to a human -* the assistant is struggling to understand the user -* the assistant understands what the user wants, but a human is required to resolve the issue. - - -.. conversations:: - examples: - - - - let me speak to a human - - ( let me put you in touch with someone. - - - - I want to cancel - - ( I'm afraid I can't help you with that. - - ( let me put you in touch with someone. - - -The direct request to speak with a human can be handled using the mapping policy: - -.. code-block:: yaml - - intents: - - request_human: {"triggers": "action_human_handoff"} diff --git a/docs/dialogue-elements/small-talk.rst b/docs/dialogue-elements/small-talk.rst deleted file mode 100644 index fd25a80d56c3..000000000000 --- a/docs/dialogue-elements/small-talk.rst +++ /dev/null @@ -1,184 +0,0 @@ -:desc: Learn to handle greetings, off-topic chitchat, and other small talk - in your bot using features provided by Rasa's open source chat assistant - platform. - -.. _small-talk: - -========== -Small Talk -========== - -.. edit-link:: - -Small talk includes the back-and-forth that makes conversations natural, -but doesn’t directly relate to the user's goal. This includes greetings, -acknowledgements, reactions, and off-topic chitchat. - -.. contents:: - :local: - -.. _greetings: - -Greetings ---------- - -Greetings and goodbyes are some of the simplest interactions. Just about every system needs them. - -.. conversations:: - examples: - - - - hello - - ( hi, how are you? - - - - how are you? - - ( I am well, and you? - - - - goodbye - - ( bye bye! - - -To respond correctly to greetings and goodbyes, you need to define responses -for each of these. If you always want the same responses, you can use the ``MappingPolicy`` -to trigger these responses when the corresponding intent is predicted. - -In your domain file, add the ``triggers`` metadata to the relevant intents: - -.. code-block:: yaml - - intents: - - greet: {triggers: utter_greet} - - goodbye: {triggers: utter_goodbye} - -And make sure the mapping policy is present in your ``config.yml``: - -.. code-block:: yaml - - policies: - - name: "MappingPolicy" - ... - -If you want to implement less rigid behaviour, use regular stories -instead of the mapping policy. For example, if you want to send a special -response if the user says goodbye immediately after saying hello, remove the -``triggers`` metadata from the domain file, and include relevant stories in your -training data: - -.. code-block:: story - - * greet - - utter_greet - * goodbye - - utter_ask_why_leaving - - -Acknowledgements ----------------- - -Your users will often react to the things your assistant says, and will expect an acknowledgement. -Acknowledgements can be as simple as a thumbs up. -They reassure the user that their message has been received. -For the most common reactions, it is worth implementing specific responses. - -.. conversations:: - examples: - - - - woah that's expensive! - - ( we offer good value. - - ( would you like to continue getting a quote? - - - - that's awesome! - - ( glad you think so :) - - -First, you need NLU data for reactions and acknowledgements: - -.. code-block:: md - - ## intent:acknowledge - - ok - - got it - - understood - - k - - ## intent:opinion+positive - - nice! - - excellent - - that's awesome - - ## intent:opinion+negative - - ugh - - that sucks - - woah! that's [expensive](price) - - -And then you need training stories to teach Rasa how to respond: - -.. code-block:: story - - ## price reaction - * opinion+negative{"price": "expensive"} - - utter_good_value - - utter_ask_continue - - ## simple acknowledgement - * opinion+positive - - utter_positive_feedback_reaction - - -Chitchat --------- - -Your assistant will often receive unexpected or unprompted input. -We call this chitchat. -While it's not possible to coherently respond to everything a user -might say, you can at least acknowledge that the message was received. -One strategy is to collect training data from your users and define intents -and responses for some of the more common topics. -See :ref:`explaining-possibilities` for how to handle out-of-scope input. - -.. conversations:: - examples: - - - - will you marry me? - - ( no - - - - I can juggle 7 oranges - - ( wow! - - - - aahhh - - ( I feel you - - -Insults -------- - -Unfortunately users will often abuse your assistant. You should acknowledge the nature of their -comment and respond in a way that reflects your assistant's persona. -Responding with a joke can encourage users to continue sending abuse, so consider your responses carefully. -You can read more about this topic in `this paper <https://www.aclweb.org/anthology/W18-0802>`_. - - -.. conversations:: - examples: - - - - stupid bot - - ( that's not very nice - - -The simplest approach is to create a single ``insult`` intent and use the mapping policy -to respond to it: - -In your domain file: - -.. code-block:: yaml - - intents: - - insult: {triggers: utter_respond_insult} - -And in your configuration file: - -.. code-block:: yaml - - policies: - - name: "MappingPolicy" - ... diff --git a/docs/docs/action-server.mdx b/docs/docs/action-server.mdx new file mode 100644 index 000000000000..a1ad3ace9df1 --- /dev/null +++ b/docs/docs/action-server.mdx @@ -0,0 +1,12 @@ +--- +id: action-server +sidebar_label: Action Server +title: Action Server +description: Check out the API docs for open source chatbot framework Rasa's action server, which allows you to define your own custom actions. +hide_table_of_contents: true +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; +import Redoc from '@site/src/components/redoc'; + +<Redoc specUrl={useBaseUrl("/spec/action-server.yml")} /> diff --git a/docs/docs/actions.mdx b/docs/docs/actions.mdx new file mode 100644 index 000000000000..634a31769558 --- /dev/null +++ b/docs/docs/actions.mdx @@ -0,0 +1,7 @@ +--- +id: actions +sidebar_label: Overview +title: Actions +--- + +<!-- TODO: add an overview of the different types of actions --> \ No newline at end of file diff --git a/docs/docs/architecture.mdx b/docs/docs/architecture.mdx new file mode 100644 index 000000000000..0bcf17175a6f --- /dev/null +++ b/docs/docs/architecture.mdx @@ -0,0 +1,39 @@ +--- +id: architecture +sidebar_label: Rasa Architecture +title: Rasa Architecture +description: Check the architecture to understand how Rasa uses machine learning, context and state of the conversation to predict the next action of the AI Assistant. +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +## Message Handling + +This diagram shows the basic steps of how an assistant built with Rasa +responds to a message: + + + +<img alt="image" src={useBaseUrl("/img/rasa-message-processing.png")} /> + +The steps are: + +1. The message is received and passed to an `Interpreter`, which + converts it into a dictionary including the original text, the intent, + and any entities that were found. This part is handled by NLU. + +2. The `Tracker` is the object which keeps track of conversation state. + It receives the info that a new message has come in. + +3. The policy receives the current state of the tracker. + +4. The policy chooses which action to take next. + +5. The chosen action is logged by the tracker. + +6. A response is sent to the user. + +:::note +Messages can be text typed by a human, or structured input +like a button press. + +::: diff --git a/docs/docs/business-logic.mdx b/docs/docs/business-logic.mdx new file mode 100644 index 000000000000..ebc006fd2447 --- /dev/null +++ b/docs/docs/business-logic.mdx @@ -0,0 +1,321 @@ +--- +id: business-logic +sidebar_label: Handling Business Logic +title: Handling Business Logic +--- + + +A lot of conversational assistants have user goals that involve collecting a bunch of information +from the user before being able to do something for them. This is called slot filling. For +example, in the banking industry you may have a user goal of transferring money, where you +need to collect information about which account to transfer from, whom to transfer to and the +amount to transfer. This type of behavior can and should be handled in a rule based way, as it +is clear how this information should be collected. + +For this type of use case, we can use Forms and our FormPolicy. The [FormPolicy](./policies.mdx#form-policy) +works by predicting the form as the next action until all information is gathered from the user. + +As an example, we will build out the SalesForm from Sara. The user wants to contact +our sales team, and for this we need to gather the following pieces of information: + +* Their job + +* Their bot use case + +* Their name + +* Their email + +* Their budget + +* Their company + +We will start by defining the `SalesForm` as a new class in the file called `actions.py`. +The first method we need to define is the name, which like in a regular Action +returns the name that will be used in our stories: + +```python +from rasa_sdk.forms import FormAction + +class SalesForm(FormAction): + """Collects sales information and adds it to the spreadsheet""" + + def name(self): + return "sales_form" +``` + +Next we have to define the `required_slots` method which specifies which pieces of information to +ask for, i.e. which slots to fill. + +```python +@staticmethod +def required_slots(tracker): + return [ + "job_function", + "use_case", + "budget", + "person_name", + "company", + "business_email", + ] +``` + +Note: you can customize the required slots function not to be static. E.g. if the `job_function` is a +developer, you could add a `required_slot` about the users experience level with Rasa + +Once you've done that, you'll need to specify how the bot should ask for this information. This +is done by specifying `utter_ask_{slotname}` responses in your `domain.yml` file. For the above +we'll need to specify the following: + +```yaml +utter_ask_business_email: + - text: What's your business email? +utter_ask_company: + - text: What company do you work for? +utter_ask_budget: + - text: "What's your annual budget for conversational AI? 💸" +utter_ask_job_function: + - text: "What's your job? 🕴" +utter_ask_person_name: + - text: What's your name? +utter_ask_use_case: + - text: What's your use case? +``` + +We'll also need to define all these slots in our `domain.yml` file: + +```yaml +slots: + company: + type: unfeaturized + job_function: + type: unfeaturized + person_name: + type: unfeaturized + budget: + type: unfeaturized + business_email: + type: unfeaturized + use_case: + type: unfeaturized +``` + +Going back to our Form definition, we need to define the `submit` method as well, +which will do something with the information the user has provided once the form is complete: + +```python +def submit( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], +) -> List[Dict]: + + dispatcher.utter_message("Thanks for getting in touch, we'll contact you soon") + return [] +``` + +In this case, we only tell the user that we'll be in touch with them, however +usually you would send this information to an API or a database. See the [rasa-demo](https://github.com/RasaHQ/rasa-demo/blob/master/actions/actions.py#L148) +for an example of how to store this information in a spreadsheet. + +We'll need to add the form we just created to a new section in our `domain.yml` file: + +```yaml +forms: + - sales_form +``` + +We also need to create an intent to activate the form, as well as an intent for providing all the +information the form asks the user for. For the form activation intent, we can create an +intent called `contact_sales`. Add the following training data to your nlu file: + +```md +## intent:contact_sales +- I wanna talk to your sales people. +- I want to talk to your sales people +- I want to speak with sales +- Sales +- Please schedule a sales call +- Please connect me to someone from sales +- I want to get in touch with your sales guys +- I would like to talk to someone from your sales team +- sales please +``` + +You can view the full intent [here](https://github.com/RasaHQ/rasa-demo/blob/master/data/nlu/nlu.md#intentcontact_sales)) + +We will also create an intent called `inform` which covers any sort of information the user +provides to the bot. *The reason we put all this under one intent, is because there is no +real intent behind providing information, only the entity is important.* Add the following +data to your NLU file: + +```md +## intent:inform +- [100k](budget) +- [100k](budget) +- [240k/year](budget) +- [150,000 USD](budget) +- I work for [Rasa](company) +- The name of the company is [ACME](company) +- company: [Rasa Technologies](company) +- it's a small company from the US, the name is [Hooli](company) +- it's a tech company, [Rasa](company) +- [ACME](company) +- [Rasa Technologies](company) +- [maxmeier@firma.de](business_email) +- [bot-fan@bots.com](business_email) +- [maxmeier@firma.de](business_email) +- [bot-fan@bots.com](business_email) +- [my email is email@rasa.com](business_email) +- [engineer](job_function) +- [brand manager](job_function) +- [marketing](job_function) +- [sales manager](job_function) +- [growth manager](job_function) +- [CTO](job_function) +- [CEO](job_function) +- [COO](job_function) +- [John Doe](person_name) +- [Jane Doe](person_name) +- [Max Mustermann](person_name) +- [Max Meier](person_name) +- We plan to build a [sales bot](use_case) to increase our sales by 500%. +- we plan to build a [sales bot](use_case) to increase our revenue by 100%. +- a [insurance tool](use_case) that consults potential customers on the best life insurance to choose. +- we're building a [conversational assistant](use_case) for our employees to book meeting rooms. +``` + +:::note +Entities like `business_email` and `budget` would usually be handled by pretrained entity extractors +(e.g. [DucklingHTTPExtractor](./components/entity-extractors.mdx#ducklinghttpextractor) +or [SpacyEntityExtractor](./components/entity-extractors.mdx#spacyentityextractor)), but for this tutorial +we want to avoid any additional setup. + +::: + +The intents and entities will need to be added to your `domain.yml` file as well: + +```yaml +intents: + - greet + - bye + - thank + - faq + - contact_sales + - inform + +entities: + - company + - job_function + - person_name + - budget + - business_email + - use_case +``` + +A story for a form is very simple, as all the slot collection form happens inside the form, and +therefore doesn't need to be covered in your stories. You just need to write a single story showing when the form should be activated. For the sales form, add this story +to your `stories.md` file: + +```md +## sales form +* contact_sales + - sales_form <!--Run the sales_form action--> + - active_loop{"name": "sales_form"} <!--Activate the form--> + - active_loop{"name": null} <!--Deactivate the form--> +``` + +As a final step, let's add the FormPolicy to our config file: + +```yaml +policies: + - name: MemoizationPolicy + - name: TEDPolicy + - name: MappingPolicy + - name: FormPolicy +``` + +At this point, you already have a working form, so let's try it out. Make sure to uncomment the +`action_endpoint` in your `endpoints.yml` to make Rasa aware of the action server that will run our form: + +```yaml +action_endpoint: + url: "http://localhost:5055/webhook" +``` + +Then start the action server in a new terminal window: + +```bash +rasa run actions +``` + +Then you can retrain and talk to your bot: + +```bash +rasa train +rasa shell +``` + +This simple form will work out of the box, however you will likely want to add a bit +more capability to handle different situations. One example of this is validating +slots, to make sure the user provided information correctly (read more about it +[here](./forms.mdx#validating-user-input)). + +Another example is that you may want to fill slots from things other than entities +of the same name. E.g. for the “use case” situation in our Form, we would expect +the user to type a full sentence and not something that you could necessarily +extract as an entity. In this case we can make use of the `slot_mappings` method, +where you can describe what your entities should be extracted from. Here we can +use the `from_text` method to extract the users whole message: + +```python +def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict[Text, Any]]]]: + """A dictionary to map required slots to + - an extracted entity + - intent: value pairs + - a whole message + or a list of them, where a first match will be picked""" + return {"use_case": self.from_text(intent="inform")} +``` + +Now our bot will extract the full user message when asking for the use case slot, +and we don't need to use the `use_case` entity defined before. + +All of the methods within a form can be customized to handle different branches in your +business logic. Read more about this [here](./forms.mdx). +However, you should make sure not to handle any unhappy paths inside the form. These +should be handled by writing regular stories, so your model can learn this behavior. + +:::note +Here's a minimal checklist of files we modified to handle business logic using a form action: + +* `actions.py`: Define the form action, including the `required_slots`, `slot_mappings` and `submit` methods + +* `data/nlu.md`: + + * Add examples for an intent to activate the form + + * Add examples for an `inform` intent to fill the form + +* `domain.yml`: + + * Add all slots required by the form + + * Add `utter_ask_{slot}` responses for all required slots + + * Add your form action to the `forms` section + + * Add all intents and entities from your NLU training data + +* `data/stories.md`: Add a story for the form + +* `config.yml`: + + * Add the `FormPolicy` to your policies + + * Add entity extractors to your pipeline + +* `endpoints.yml`: Define the `action_endpoint` + +::: diff --git a/docs/docs/cdd.mdx b/docs/docs/cdd.mdx new file mode 100644 index 000000000000..2bf050c11612 --- /dev/null +++ b/docs/docs/cdd.mdx @@ -0,0 +1,5 @@ +--- +id: cdd +sidebar_label: Conversation-Driven Development +title: Conversation-Driven Development +--- diff --git a/docs/docs/cheatsheet.mdx b/docs/docs/cheatsheet.mdx new file mode 100644 index 000000000000..156277e3b5d8 --- /dev/null +++ b/docs/docs/cheatsheet.mdx @@ -0,0 +1,7 @@ +--- +id: cheatsheet +sidebar_label: Rasa Cheatsheet +title: Rasa Cheatsheet +--- + +<!-- TODO: rasa cheatsheet page --> diff --git a/docs/docs/chitchat-faqs.mdx b/docs/docs/chitchat-faqs.mdx new file mode 100644 index 000000000000..4fda0474cba8 --- /dev/null +++ b/docs/docs/chitchat-faqs.mdx @@ -0,0 +1,389 @@ +--- +id: chitchat-faqs +sidebar_label: Chitchat and FAQs +title: Chitchat and FAQs +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; + +<!-- TODO: Restructure all conversations patterns pages to not depend on one another --> + +After following the basics of [prototyping an assistant](./prototype-an-assistant.mdx), we'll +now walk through building a basic FAQ chatbot and then build a bot that can handle +contextual conversations. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="build-faq-assistant"></a> + +FAQ assistants are the simplest assistants to build and a good place to get started. +These assistants allow the user to ask a simple question and get a response. We're going to +build a basic FAQ assistant using features of Rasa designed specifically for this type of assistant. + +In this section we're going to cover the following topics: + +* [Responding to simple intents](./chitchat-faqs.mdx#respond-with-memoization-policy) with the MemoizationPolicy + +* [Handling FAQs](./chitchat-faqs.mdx#faqs-response-selector) using the ResponseSelector + +We're going to use content from [Sara](https://github.com/RasaHQ/rasa-demo), the Rasa +assistant that, amongst other things, helps the user get started with the Rasa products. +You should [first install Rasa](installation.mdx) +and then [prototype an assistant](prototype-an-assistant.mdx) +to make sure you know the basics. + +To prepare for this tutorial, we're going to start a new Rasa project: + +```bash +rasa init +``` + +Let's remove the default content from this bot, so that the `data/nlu.yml`, `data/stories.yml` +and `domain.yml` files are empty. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="respond-with-memoization-policy"></a> + +## Memoization Policy + +The MemoizationPolicy remembers examples from training stories for up to a `max_history` +of turns. One “turn” includes the message sent by the user and any actions the +assistant performed before waiting for the next message. For the purpose of a simple, +context-less FAQ bot, we only need to pay attention to the last message the user sent, +and therefore we'll set that to `1`. + +You can do this by editing your configuration file as follows +(you can remove `TEDPolicy` for now): + +```yaml title="config.yml" +policies: +- name: MemoizationPolicy + max_history: 1 +- name: MappingPolicy +``` + +:::note MappingPolicy +The `MappingPolicy` is there because it handles the logic of the `/restart` intent, +which allows you to clear the conversation history and start fresh. + +::: + +Now that we've defined our policies, we can add some stories for the `goodbye`, `thank` and `greet` +intents to our stories: + +```yaml title="data/stories.yml" +stories: + +- story: greet # name of the story + steps: + - intent: greet # intent of the user message + - action: utter_greet # reaction of the bot + +- story: thank + steps: + - intent: thank + - action: utter_noworries + +- story: goodbye + steps: + - intent: bye + - action: utter_bye +``` + +We'll also need to add the intents, actions and responses to our domain: + +```yml title="domain.yml" +intents: + - greet + - bye + - thank + +responses: + utter_noworries: + - text: No worries! + + utter_greet: + - text: Hi + + utter_bye: + - text: Bye! +``` + +Finally, we'll copy over some user message training data from Sara to train our +intents (more can be found [here](https://github.com/RasaHQ/rasa-demo/blob/master/data/nlu/nlu.md)): + + +```yaml title="data/nlu.yml" +nlu: +- intent: greet + examples: | + - Hi + - Hey + - Hi bot + - Hey bot + - Hello + - Good morning + - hi again + - hi folks + +- intent: bye + examples: | + - goodbye + - goodnight + - good bye + - good night + - see ya + - toodle-oo + - bye bye + - gotta go + - farewell + +- intent: thank + examples: | + - Thanks + - Thank you + - Thank you so much + - Thanks bot + - Thanks for that + - cheers +``` + +You can now train a first model and test the bot, by running the following commands: + +```bash +rasa train +rasa shell +``` + +This bot should now be able to reply to the intents we defined consistently, and in any order. + +For example: + +<img alt="Memoization Policy Conversation" src={useBaseUrl("/img/memoization_policy_convo.png")} /> + +While it's good to test the bot interactively, we should also add end to end test cases that +can later be included as part of a [CI/CD system](./setting-up-ci-cd). End-to-end [test conversations](./testing-your-assistant#end-to-end-testing) +include NLU data, so that both components of Rasa can be tested. +The file `tests/conversation_tests.md` contains example test conversations. Delete all the test conversations and replace +them with some test conversations for your assistant so far: + +```yaml title="tests/conversation_tests.yml" +e2e_tests: +- story: greet and goodybe + steps: + - user: | + Hi! + intent: greet + - action: utter_greet + - user: | + Bye + intent: bye + - action: utter_bye + +- story: greet and thanks + steps: + - user: | + Hello there + intent: greet + - action: utter_greet + - user: | + thanks a bunch + intent: thank + - action: utter_noworries + +- story: greet and thanks and goodbye + steps: + - user: | + Hey + intent: greet + - action: utter_greet + - user: | + thanks you + intent: thank + - action: utter_noworries + - user: | + bye bye + intent: bye + - action: utter_bye +``` + +To test our model against the test file, run the command: + +```bash +rasa test --stories tests/conversation_tests.yml +``` + +The test command will produce a directory named `results`. It should contain a file +called `failed_stories.yml`, where any test cases that failed will be printed. It will +also specify whether it was an NLU or Core prediction that went wrong. As part of a +CI/CD pipeline, the test option `--fail-on-prediction-errors` can be used to throw +an exception that stops the pipeline. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="faqs-response-selector"></a> + +## Response Selectors + +The [ResponseSelector](components/selectors.mdx#responseselector) NLU component +is designed to make it easier to handle conversation patterns like small talk and +FAQ messages in a simple manner. By using the `ResponseSelector`, you only need one +story to handle all FAQs, instead of adding new stories every time you want to +increase your bot's scope. + +People often ask Sara different questions surrounding the Rasa products, so let's +start with three intents: `ask_channels`, `ask_languages`, and `ask_rasax`. +We're going to copy over some user messages from the +[Sara training data](https://github.com/RasaHQ/rasa-demo/blob/master/data/nlu/nlu.md) +into our training data. It's important that these intents have an `faq/` prefix, so +they're recognized as the faq intent by the `ResponseSelector`: + +```yml title="data/nlu.yml" +nlu: +- intent: faq/ask_channels + examples: | + - What channels of communication does rasa support? + - what channels do you support? + - what chat channels does rasa uses + - channels supported by Rasa + - which messaging channels does rasa support? + +- intent: faq/ask_languages + examples: | + - what language does rasa support? + - which language do you support? + - which languages supports rasa + - can I use rasa also for another laguage? + - languages supported + +- intent: faq/ask_rasax + examples: | + - I want information about rasa x + - i want to learn more about Rasa X + - what is rasa x? + - Can you tell me about rasa x? + - Tell me about rasa x + - tell me what is rasa x +``` + +Next, we'll need to define the responses associated with these FAQs in a new +file: + +```yaml title="data/responses.yml" +responses: + faq/ask_channels: + - text: | + We have a comprehensive list of [supported connectors](https://rasa.com/docs/core/connectors/), but if + you don't see the one you're looking for, you can always create a custom connector by following + [this guide](./connectors/custom-connectors.mdx). + + faq/ask_languages: + - text: "You can use Rasa to build assistants in any language you want!" + + faq/ask_rasax: + - text: "Rasa X is a tool to learn from real conversations and improve your assistant. Read more [here](https://rasa.com/docs/rasa-x/)" +``` + +The `ResponseSelector` should already be at the end of your pipeline configuration: + +```yaml title="config.yml" {14-15} +language: en +pipeline: + - name: WhitespaceTokenizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 +``` + +Now that we've defined the message handling side, we need to make +the dialogue handling parts aware of these changes. First, we need to add the +new intents to our domain: + +```yaml title="domain.yml" +intents: + - greet + - bye + - thank + - faq +``` + +We'll also need to add a [retrieval action](./retrieval-actions.mdx), +which takes care of sending the response predicted from the `ResponseSelector` +back to the user, to the list of actions. These actions always have to start +with the `respond_` prefix: + +```yaml title="domain.yml" +actions: + - respond_faq +``` + +Next we'll write a story so that the dialogue engine knows which action to predict: + +```yml title="data/stories.yml" +stories: +- story: Some question from FAQ + steps: + - intent: faq + - action: respond_faq +``` + +This prediction is handled by the `MemoizationPolicy`, as we described earlier. + +After all of the changes are done, train a new model and test the modified FAQs: + +```bash +rasa train +rasa shell +``` + +At this stage it makes sense to add a few test cases for our conversations: + +```yaml title="tests/conversation_tests.yml" +e2e_tests: +- story: ask channels + steps: + - user: | + What messaging channels does Rasa support? + intent: faq + - action: respond_faq + +- story: ask languages + steps: + - user: | + Which languages can I build assistants in? + intent: faq + - action: respond_faq + +- story: ask rasa x + steps: + - user: | + What's Rasa X? + intent: faq + - action: respond_faq +``` + +You can read more in this [blog post](https://blog.rasa.com/response-retrieval-models/) and the +[Retrieval Actions](./retrieval-actions.mdx) page. + +Using the features we described in this tutorial, you can easily build a context-less assistant. + +:::note Checklist +Here's a minimal checklist of files we modified to build a basic FAQ assistant: + +* `data/nlu.yml`: Add NLU training data for `faq/` intents + +* `data/responses.yml`: Add responses associated with `faq/` intents + +* `config.yml`: Add `ResponseSelector` in your NLU pipeline + +* `domain.yml`: Add a retrieval action `respond_faq` and intent `faq` + +* `data/stories.yml`: Add a simple story for FAQs + +* `tests/conversation_tests.yml`: Add E2E test stories for your FAQs + +::: diff --git a/docs/docs/command-line-interface.mdx b/docs/docs/command-line-interface.mdx new file mode 100644 index 000000000000..afe619ba1fb2 --- /dev/null +++ b/docs/docs/command-line-interface.mdx @@ -0,0 +1,294 @@ +--- +id: command-line-interface +sidebar_label: Command Line Interface +title: Command Line Interface +description: Command line interface for open source chatbot framework Rasa. Learn how to train, test and run your machine learning-based conversational AI assistants +--- + +## Cheat Sheet + +The command line interface (CLI) gives you easy-to-remember commands for common tasks. + +| Command | Effect | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------| +|`rasa init` |Creates a new project with example training data, actions, and config files. | +|`rasa train` |Trains a model using your NLU data and stories, saves trained model in `./models`. | +|`rasa interactive` |Starts an interactive learning session to create new training data by chatting. | +|`rasa shell` |Loads your trained model and lets you talk to your assistant on the command line. | +|`rasa run` |Starts a Rasa server with your trained model. See the [Model Storage](./model-storage.mdx) docs for details. | +|`rasa run actions` |Starts an action server using the Rasa SDK. | +|`rasa visualize` |Visualizes stories. | +|`rasa test` |Tests a trained Rasa model using your test NLU data and stories. | +|`rasa data split nlu` |Performs a split of your NLU data according to the specified percentages. | +|`rasa data convert nlu` |Converts NLU training data between different formats. | +|`rasa export` |Export conversations from a tracker store to an event broker. | +|`rasa x` |Launch Rasa X locally. | +|`rasa -h` |Shows all available commands. | + +## Create a new project + +A single command sets up a complete project for you with some example training data. + +```bash +rasa init +``` + +This creates the following files: + +```bash +. +├── __init__.py +├── actions.py +├── config.yml +├── credentials.yml +├── data +│   ├── nlu.md +│   └── stories.md +├── domain.yml +├── endpoints.yml +├── models +│ └── <timestamp>.tar.gz +└── tests + └── conversation_tests.md +``` + +The `rasa init` command will ask you if you want to train an initial model using this data. +If you answer no, the `models` directory will be empty. + +With this project setup, common commands are very easy to remember. +To train a model, type `rasa train`, to talk to your model on the command line, `rasa shell`, +to test your model type `rasa test`. + +## Train a Model + +The main command is: + +```bash +rasa train +``` + +This command trains a Rasa model that combines a Rasa NLU and a Rasa Core model. +If you only want to train an NLU or a Core model, you can run `rasa train nlu` or `rasa train core`. +However, Rasa will automatically skip training Core or NLU if the training data and config haven't changed. + +`rasa train` will store the trained model in the directory defined by `--out`. The name of the model +is per default `<timestamp>.tar.gz`. If you want to name your model differently, you can specify the name +using `--fixed-model-name`. + +The following arguments can be used to configure the training process: + +```text [rasa train --help] +``` + +:::note +Make sure training data for Core and NLU are present when training a model using `rasa train`. +If training data for only one model type is present, the command automatically falls back to +`rasa train nlu` or `rasa train core` depending on the provided training files. + +::: + +## Interactive Learning + +To start an interactive learning session with your assistant, run + +```bash +rasa interactive +``` + +If you provide a trained model using the `--model` argument, the interactive learning process +is started with the provided model. If no model is specified, `rasa interactive` will +train a new Rasa model with the data located in `data/` if no other directory was passed to the +`--data` flag. After training the initial model, the interactive learning session starts. +Training will be skipped if the training data and config haven't changed. + +During the interactive learning, Rasa will plot the current conversation +and a few similar conversations from the training data to help you +keep track of where you are. You can view the visualization at +[http://localhost:5005/visualization.html](http://localhost:5005/visualization.html) +as soon as you've started interactive learning. +This plotting can take some time; to skip the visualization, run `rasa interactive --skip-visualization`. + + +The full list of arguments that can be set for `rasa interactive` is: + +```text [rasa interactive --help] +``` + +## Talk to your Assistant + +To start a chat session with your assistant on the command line, run: + +```bash +rasa shell +``` + +The model that should be used to interact with your bot can be specified by `--model`. +If you start the shell with an NLU-only model, `rasa shell` allows +you to obtain the intent and entities of any text you type on the command line. +If your model includes a trained Core model, you can chat with your bot and see +what the bot predicts as a next action. +If you have trained a combined Rasa model but nevertheless want to see what your model +extracts as intents and entities from text, you can use the command `rasa shell nlu`. + +To increase the logging level for debugging, run: + +```bash +rasa shell --debug +``` + +:::note +In order to see the typical greetings and/or session start behavior you might see +in an external channel, you will need to explicitly send `/session_start` +as the first message. Otherwise, the session start behavior will begin as described in +[Session configuration](./domain.mdx#session-config). + +::: + +The full list of options for `rasa shell` is: + +```text [rasa shell --help] +``` + +## Start a Server + +To start a server running your Rasa model, run: + +```bash +rasa run +``` + +By default the Rasa server is using HTTP for its communication. To secure the communication with +SSL and run the server on HTTPS, you need to provide a valid certificate and the corresponding +private key file. You can specify these files as part of the `rasa run` command. +If you encrypted your keyfile with a password during creation, +you need to add the `--ssl-password` as well. + +```bash +rasa run --ssl-certificate myssl.crt --ssl-keyfile myssl.key --ssl-password mypassword +``` + +The following arguments can be used to configure your Rasa server: + +```text [rasa run --help] +``` + +For more information on the additional parameters, see [Model Storage](./model-storage.mdx). +See the Rasa [HTTP API](./http-api.mdx) docs for detailed documentation of all the endpoints. + +## Start an Action Server + +To run your action server run + +```bash +rasa run actions +``` + +The following arguments can be used to adapt the server settings: + +```text [rasa run actions --help] +``` + +## Visualize your Stories + +To open a browser tab with a graph showing your stories: + +```bash +rasa visualize +``` + +Normally, training stories in the directory `data` are visualized. If your stories are located +somewhere else, you can specify their location with `--stories`. + +Additional arguments are: + +```text [rasa visualize --help] +``` + +## Evaluating a Model on Test Data + +To evaluate your model on test data, run: + +```bash +rasa test +``` + +Specify the model to test using `--model`. +Check out more details in [Evaluating an NLU Model](./testing-your-assistant.mdx#evaluating-an-nlu-model) and [Evaluating a Core Model](./testing-your-assistant.mdx#evaluating-a-core-model). + +The following arguments are available for `rasa test`: + +```text [rasa test --help] +``` + +## Create a Train-Test Split + +To create a split of your NLU data, run: + +```bash +rasa data split nlu +``` + +You can specify the training data, the fraction, and the output directory using the following arguments: + +```text [rasa data split nlu --help] +``` + +This command will attempt to keep the proportions of intents the same in train and test. +If you have NLG data for retrieval actions, this will be saved to seperate files: + +```bash +ls train_test_split + + nlg_test_data.md test_data.json + nlg_training_data.md training_data.json +``` + +## Convert Data Between Markdown and JSON + +To convert NLU data from LUIS data format, WIT data format, Dialogflow data format, JSON, or Markdown +to JSON or Markdown, run: + +```bash +rasa data convert nlu +``` + +You can specify the input file, output file, and the output format with the following arguments: + +```text [rasa data convert nlu --help] +``` + +## Export Conversations to an Event Broker + +To export events from a tracker store using an event broker, run: + +```bash +rasa export +``` + +You can specify the location of the environments file, the minimum and maximum +timestamps of events that should be published, as well as the conversation IDs that +should be published. + +```text [rasa export --help] +``` + +## Start Rasa X + +Rasa X is a toolset that helps you leverage conversations to improve your assistant. +You can find more information about it <a className="reference external" href="https://rasa.com/docs/rasa-x/" target="_blank">here</a>.You can start Rasa X locally by executing + +```bash +rasa x +``` + +To be able to start Rasa X you need to have Rasa X local mode installed +and you need to be in a Rasa project.:::note +By default Rasa X runs on the port 5002. Using the argument `--rasa-x-port` allows you to change it to +any other port. + +::: + +The following arguments are available for `rasa x`: + +```text [rasa x --help] +``` diff --git a/docs/docs/components/custom-nlu-components.mdx b/docs/docs/components/custom-nlu-components.mdx new file mode 100644 index 000000000000..c9001dd5f652 --- /dev/null +++ b/docs/docs/components/custom-nlu-components.mdx @@ -0,0 +1,50 @@ +--- +id: custom-nlu-components +sidebar_label: Custom NLU Components +title: Custom NLU Components +description: Create custom components to create additional features like sentiment analysis to integrate with open source bot framework Rasa. +--- + +You can create a custom component to perform a specific task which NLU doesn't currently offer (for example, sentiment analysis). +Below is the specification of the [`rasa.nlu.components.Component`](./components/custom-nlu-components.mdx#rasa.nlu.components.Component) class with the methods you'll need to implement. + +:::note +There is a detailed tutorial on building custom components [here](https://blog.rasa.com/enhancing-rasa-nlu-with-custom-components/). + +::: + +You can add a custom component to your pipeline by adding the module path. +So if you have a module called `sentiment` +containing a `SentimentAnalyzer` class: + +```yaml +pipeline: +- name: "sentiment.SentimentAnalyzer" +``` + +Also be sure to read the section on the [Component Lifecycle](../tuning-your-model.mdx#component-lifecycle). + +To get started, you can use this skeleton that contains the most important +methods that you should implement: + +```python (docs/sources/tests/nlu/example_component.py) +``` + +:::note +If you create a custom tokenizer you should implement the methods of `rasa.nlu.tokenizers.tokenizer.Tokenizer`. +The `train` and `process` methods are already implemented and you simply need to overwrite the `tokenize` +method. + +::: + +:::note +If you create a custom featurizer you can return two different kind of features: sequence features and sentence +features. The sequence features are a matrix of size `(number-of-tokens x feature-dimension)`, e.g. +the matrix contains a feature vector for every token in the sequence. +The sentence features are represented by a matrix of size `(1 x feature-dimension)`. + +::: + +## Component + +- SKIPPED CLASS DOCUMENTATION - diff --git a/docs/docs/components/entity-extractors.mdx b/docs/docs/components/entity-extractors.mdx new file mode 100644 index 000000000000..2860f8ceb9ea --- /dev/null +++ b/docs/docs/components/entity-extractors.mdx @@ -0,0 +1,451 @@ +--- +id: entity-extractors +sidebar_label: Entity Extractors +title: Entity Extractors +--- + + +Entity extractors extract entities, such as person names or locations, from the user message. + + +## MitieEntityExtractor + + +* **Short** + + MITIE entity extraction (using a [MITIE NER trainer](https://github.com/mit-nlp/MITIE/blob/master/mitielib/src/ner_trainer.cpp)) + + + +* **Outputs** + + `entities` + + + +* **Requires** + + [MitieNLP](../components/language-models.mdx#mitienlp) and `tokens` + + + +* **Output-Example** + + ```json + { + "entities": [{ + "value": "New York City", + "start": 20, + "end": 33, + "confidence": null, + "entity": "city", + "extractor": "MitieEntityExtractor" + }] + } + ``` + + + +* **Description** + + `MitieEntityExtractor` uses the MITIE entity extraction to find entities in a message. The underlying classifier + is using a multi class linear SVM with a sparse linear kernel and custom features. + The MITIE component does not provide entity confidence values. + + :::note + This entity extractor does not rely on any featurizer as it extracts features on its own. + + ::: + + + +* **Configuration** + + ```yaml + pipeline: + - name: "MitieEntityExtractor" + ``` + + +## SpacyEntityExtractor + + +* **Short** + + spaCy entity extraction + + + +* **Outputs** + + `entities` + + + +* **Requires** + + [SpacyNLP](../components/language-models.mdx#spacynlp) + + + +* **Output-Example** + + ```json + { + "entities": [{ + "value": "New York City", + "start": 20, + "end": 33, + "confidence": null, + "entity": "city", + "extractor": "SpacyEntityExtractor" + }] + } + ``` + + + +* **Description** + + Using spaCy this component predicts the entities of a message. spaCy uses a statistical BILOU transition model. + As of now, this component can only use the spaCy builtin entity extraction models and can not be retrained. + This extractor does not provide any confidence scores. + + You can test out spaCy's entity extraction models in this [interactive demo](https://explosion.ai/demos/displacy-ent). + Note that some spaCy models are highly case-sensitive. + +:::note +The `SpacyEntityExtractor` extractor does not provide a `confidence` level and will always return `null`. + +::: + +* **Configuration** + + Configure which dimensions, i.e. entity types, the spaCy component + should extract. A full list of available dimensions can be found in + the [spaCy documentation](https://spacy.io/api/annotation#section-named-entities). + Leaving the dimensions option unspecified will extract all available dimensions. + + ```yaml + pipeline: + - name: "SpacyEntityExtractor" + # dimensions to extract + dimensions: ["PERSON", "LOC", "ORG", "PRODUCT"] + ``` + + +## CRFEntityExtractor + + +* **Short** + + Conditional random field (CRF) entity extraction + + + +* **Outputs** + + `entities` + + + +* **Requires** + + `tokens` and `dense_features` (optional) + + + +* **Output-Example** + + ```json + { + "entities": [{ + "value": "New York City", + "start": 20, + "end": 33, + "entity": "city", + "confidence": 0.874, + "extractor": "CRFEntityExtractor" + }] + } + ``` + + + +* **Description** + + This component implements a conditional random fields (CRF) to do named entity recognition. + CRFs can be thought of as an undirected Markov chain where the time steps are words + and the states are entity classes. Features of the words (capitalization, POS tagging, + etc.) give probabilities to certain entity classes, as are transitions between + neighbouring entity tags: the most likely set of tags is then calculated and returned. + + + If you want to pass custom features, such as pre-trained word embeddings, to `CRFEntityExtractor`, you can + add any dense featurizer to the pipeline before the `CRFEntityExtractor`. + `CRFEntityExtractor` automatically finds the additional dense features and checks if the dense features are an + iterable of `len(tokens)`, where each entry is a vector. + A warning will be shown in case the check fails. + However, `CRFEntityExtractor` will continue to train just without the additional custom features. + In case dense features are present, `CRFEntityExtractor` will pass the dense features to `sklearn_crfsuite` + and use them for training. + + + +* **Configuration** + + `CRFEntityExtractor` has a list of default features to use. + However, you can overwrite the default configuration. + The following features are available: + + ``` + ============== ========================================================================================== + Feature Name Description + ============== ========================================================================================== + low Checks if the token is lower case. + upper Checks if the token is upper case. + title Checks if the token starts with an uppercase character and all remaining characters are + lowercased. + digit Checks if the token contains just digits. + prefix5 Take the first five characters of the token. + prefix2 Take the first two characters of the token. + suffix5 Take the last five characters of the token. + suffix3 Take the last three characters of the token. + suffix2 Take the last two characters of the token. + suffix1 Take the last character of the token. + pos Take the Part-of-Speech tag of the token (``SpacyTokenizer`` required). + pos2 Take the first two characters of the Part-of-Speech tag of the token + (``SpacyTokenizer`` required). + pattern Take the patterns defined by ``RegexFeaturizer``. + bias Add an additional "bias" feature to the list of features. + ============== ========================================================================================== + ``` + + As the featurizer is moving over the tokens in a user message with a sliding window, you can define features for + previous tokens, the current token, and the next tokens in the sliding window. + You define the features as [before, token, after] array. + + Additional you can set a flag to determine whether to use the BILOU tagging schema or not. + + * `BILOU_flag` determines whether to use BILOU tagging or not. Default `True`. + + ```yaml + pipeline: + - name: "CRFEntityExtractor" + # BILOU_flag determines whether to use BILOU tagging or not. + "BILOU_flag": True + # features to extract in the sliding window + "features": [ + ["low", "title", "upper"], + [ + "bias", + "low", + "prefix5", + "prefix2", + "suffix5", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + "pattern", + ], + ["low", "title", "upper"], + ] + # The maximum number of iterations for optimization algorithms. + "max_iterations": 50 + # weight of the L1 regularization + "L1_c": 0.1 + # weight of the L2 regularization + "L2_c": 0.1 + # Name of dense featurizers to use. + # If list is empty all available dense features are used. + "featurizers": [] + ``` + + :::note + If POS features are used (`pos` or `pos2`), you need to have `SpacyTokenizer` in your pipeline. + + ::: + + :::note + If `pattern` features are used, you need to have `RegexFeaturizer` in your pipeline. + + ::: + + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="ducklinghttpextractor"></a> + +## DucklingHTTPExtractor + + +* **Short** + + Duckling lets you extract common entities like dates, + amounts of money, distances, and others in a number of languages. + + + +* **Outputs** + + `entities` + + + +* **Requires** + + Nothing + + + +* **Output-Example** + + ```json + { + "entities": [{ + "end": 53, + "entity": "time", + "start": 48, + "value": "2017-04-10T00:00:00.000+02:00", + "confidence": 1.0, + "extractor": "DucklingHTTPExtractor" + }] + } + ``` + + + +* **Description** + + To use this component you need to run a duckling server. The easiest + option is to spin up a docker container using + `docker run -p 8000:8000 rasa/duckling`. + + Alternatively, you can [install duckling directly on your + machine](https://github.com/facebook/duckling#quickstart) and start the server. + + Duckling allows to recognize dates, numbers, distances and other structured entities + and normalizes them. + Please be aware that duckling tries to extract as many entity types as possible without + providing a ranking. For example, if you specify both `number` and `time` as dimensions + for the duckling component, the component will extract two entities: `10` as a number and + `in 10 minutes` as a time from the text `I will be there in 10 minutes`. In such a + situation, your application would have to decide which entity type is be the correct one. + The extractor will always return 1.0 as a confidence, as it is a rule + based system. + + The list of supported languages can be found [here](https://github.com/facebook/duckling/tree/master/Duckling/Dimensions). + + + +* **Configuration** + + Configure which dimensions, i.e. entity types, the duckling component + should extract. A full list of available dimensions can be found in + the [duckling documentation](https://duckling.wit.ai/). + Leaving the dimensions option unspecified will extract all available dimensions. + + ```yaml + pipeline: + - name: "DucklingHTTPExtractor" + # url of the running duckling server + url: "http://localhost:8000" + # dimensions to extract + dimensions: ["time", "number", "amount-of-money", "distance"] + # allows you to configure the locale, by default the language is + # used + locale: "de_DE" + # if not set the default timezone of Duckling is going to be used + # needed to calculate dates from relative expressions like "tomorrow" + timezone: "Europe/Berlin" + # Timeout for receiving response from http url of the running duckling server + # if not set the default timeout of duckling http url is set to 3 seconds. + timeout : 3 + ``` + + +## DIETClassifier + + +* **Short** + + Dual Intent Entity Transformer (DIET) used for intent classification and entity extraction + + + +* **Description** + + You can find the detailed description of the [DIETClassifier](../components/intent-classifiers.mdx#dietclassifier) under the section + Intent Classifiers. + + + +## EntitySynonymMapper + + +* **Short** + + Maps synonymous entity values to the same value. + + + +* **Outputs** + + Modifies existing entities that previous entity extraction components found. + + + +* **Requires** + + An extractor from [Entity Extractors](../components/entity-extractors.mdx) + + + +* **Description** + + If the training data contains defined synonyms, this component will make sure that detected entity values will + be mapped to the same value. For example, if your training data contains the following examples: + + ```json + [ + { + "text": "I moved to New York City", + "intent": "inform_relocation", + "entities": [{ + "value": "nyc", + "start": 11, + "end": 24, + "entity": "city", + }] + }, + { + "text": "I got a new flat in NYC.", + "intent": "inform_relocation", + "entities": [{ + "value": "nyc", + "start": 20, + "end": 23, + "entity": "city", + }] + } + ] + ``` + + This component will allow you to map the entities `New York City` and `NYC` to `nyc`. The entity + extraction will return `nyc` even though the message contains `NYC`. When this component changes an + existing entity, it appends itself to the processor list of this entity. + + + +* **Configuration** + + ```yaml + pipeline: + - name: "EntitySynonymMapper" + ``` + + :::note + When using the `EntitySynonymMapper` as part of an NLU pipeline, it will need to be placed + below any entity extractors in the configuration file. + + ::: diff --git a/docs/docs/components/featurizers.mdx b/docs/docs/components/featurizers.mdx new file mode 100644 index 000000000000..873252146ff3 --- /dev/null +++ b/docs/docs/components/featurizers.mdx @@ -0,0 +1,547 @@ +--- +id: featurizers +sidebar_label: Text Featurizers +title: Text Featurizers +--- + +Text featurizers are divided into two different categories: sparse featurizers and dense featurizers. +Sparse featurizers are featurizers that return feature vectors with a lot of missing values, e.g. zeros. +As those feature vectors would normally take up a lot of memory, we store them as sparse features. +Sparse features only store the values that are non zero and their positions in the vector. +Thus, we save a lot of memory and are able to train on larger datasets. + +All featurizers can return two different kind of features: sequence features and sentence features. +The sequence features are a matrix of size `(number-of-tokens x feature-dimension)`. +The matrix contains a feature vector for every token in the sequence. +This allows us to train sequence models. +The sentence features are represented by a matrix of size `(1 x feature-dimension)`. +It contains the feature vector for the complete utterance. +The sentence features can be used in any bag-of-words model. +The corresponding classifier can therefore decide what kind of features to use. +Note: The `feature-dimension` for sequence and sentence features does not have to be the same. + +## MitieFeaturizer + + +* **Short** + + Creates a vector representation of user message and response (if specified) using the MITIE featurizer. + + + +* **Outputs** + + `dense_features` for user messages and responses + + + +* **Requires** + + [MitieNLP](../components/language-models.mdx#mitienlp) + + + +* **Type** + + Dense featurizer + + + +* **Description** + + Creates features for entity extraction, intent classification, and response classification using the MITIE + featurizer. + + :::note + NOT used by the `MitieIntentClassifier` component. But can be used by any component later in the pipeline + that makes use of `dense_features`. + + ::: + + + +* **Configuration** + + The sentence vector, i.e. the vector of the complete utterance, can be calculated in two different ways, either via + mean or via max pooling. You can specify the pooling method in your configuration file with the option `pooling`. + The default pooling method is set to `mean`. + + ```yaml + pipeline: + - name: "MitieFeaturizer" + # Specify what pooling operation should be used to calculate the vector of + # the complete utterance. Available options: 'mean' and 'max'. + "pooling": "mean" + ``` + + +## SpacyFeaturizer + + +* **Short** + + Creates a vector representation of user message and response (if specified) using the spaCy featurizer. + + + +* **Outputs** + + `dense_features` for user messages and responses + + + +* **Requires** + + [SpacyNLP](../components/language-models.mdx#spacynlp) + + + +* **Type** + + Dense featurizer + + + +* **Description** + + Creates features for entity extraction, intent classification, and response classification using the spaCy + featurizer. + + + +* **Configuration** + + The sentence vector, i.e. the vector of the complete utterance, can be calculated in two different ways, either via + mean or via max pooling. You can specify the pooling method in your configuration file with the option `pooling`. + The default pooling method is set to `mean`. + + ```yaml + pipeline: + - name: "SpacyFeaturizer" + # Specify what pooling operation should be used to calculate the vector of + # the complete utterance. Available options: 'mean' and 'max'. + "pooling": "mean" + ``` + + +## ConveRTFeaturizer + + +* **Short** + + Creates a vector representation of user message and response (if specified) using + [ConveRT](https://github.com/PolyAI-LDN/polyai-models) model. + + + +* **Outputs** + + `dense_features` for user messages and responses + + + +* **Requires** + + [ConveRTTokenizer](../components/tokenizers.mdx#converttokenizer) + + + +* **Type** + + Dense featurizer + + + +* **Description** + + Creates features for entity extraction, intent classification, and response selection. + It uses the [default signature](https://github.com/PolyAI-LDN/polyai-models#tfhub-signatures) to compute vector + representations of input text. + + :::note + Since `ConveRT` model is trained only on an English corpus of conversations, this featurizer should only + be used if your training data is in English language. + + ::: + + :::note + To use `ConveRTTokenizer`, install Rasa Open Source with `pip install rasa[convert]`. + + ::: + + + +* **Configuration** + + ```yaml + pipeline: + - name: "ConveRTFeaturizer" + ``` + + +## LanguageModelFeaturizer + + +* **Short** + + Creates a vector representation of user message and response (if specified) using a pre-trained language model. + + + +* **Outputs** + + `dense_features` for user messages and responses + + + +* **Requires** + + [HFTransformersNLP](../components/language-models.mdx#lhftransformersnlp) and [LanguageModelTokenizer](../components/tokenizers.mdx#languagemodeltokenizer) + + + +* **Type** + + Dense featurizer + + + +* **Description** + + Creates features for entity extraction, intent classification, and response selection. + Uses the pre-trained language model specified in upstream [HFTransformersNLP](../components/language-models.mdx#lhftransformersnlp) component to compute vector + representations of input text. + + :::note + Please make sure that you use a language model which is pre-trained on the same language corpus as that of your + training data. + + ::: + + + +* **Configuration** + + Include [HFTransformersNLP](../components/language-models.mdx#lhftransformersnlp) and [LanguageModelTokenizer](../components/tokenizers.mdx#languagemodeltokenizer) components before this component. Use + [LanguageModelTokenizer](../components/tokenizers.mdx#languagemodeltokenizer) to ensure tokens are correctly set for all components throughout the pipeline. + + ```yaml + pipeline: + - name: "LanguageModelFeaturizer" + ``` + + +## RegexFeaturizer + + +* **Short** + + Creates a vector representation of user message using regular expressions. + + + +* **Outputs** + + `sparse_features` for user messages and `tokens.pattern` + + + +* **Requires** + + `tokens` + + + +* **Type** + + Sparse featurizer + + + +* **Description** + + Creates features for entity extraction and intent classification. + During training the `RegexFeaturizer` creates a list of regular expressions defined in the training + data format. + For each regex, a feature will be set marking whether this expression was found in the user message or not. + All features will later be fed into an intent classifier / entity extractor to simplify classification (assuming + the classifier has learned during the training phase, that this set feature indicates a certain intent / entity). + Regex features for entity extraction are currently only supported by the [CRFEntityExtractor](../components/entity-extractors.mdx#crfentityextractor) and the + [DIETClassifier](../components/intent-classifiers.mdx#dietclassifier) components! + + + +* **Configuration** + + Make the featurizer case insensitive by adding the `case_sensitive: False` option, the default being + `case_sensitive: True`. + + ```yaml + pipeline: + - name: "RegexFeaturizer" + # Text will be processed with case sensitive as default + "case_sensitive": True + ``` + + +## CountVectorsFeaturizer + + +* **Short** + + Creates bag-of-words representation of user messages, intents, and responses. + + + +* **Outputs** + + `sparse_features` for user messages, intents, and responses + + + +* **Requires** + + `tokens` + + + +* **Type** + + Sparse featurizer + + + +* **Description** + + Creates features for intent classification and response selection. + Creates bag-of-words representation of user message, intent, and response using + [sklearn's CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html). + All tokens which consist only of digits (e.g. 123 and 99 but not a123d) will be assigned to the same feature. + + + +* **Configuration** + + See [sklearn's CountVectorizer docs](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) + for detailed description of the configuration parameters. + + This featurizer can be configured to use word or character n-grams, using the `analyzer` configuration parameter. + By default `analyzer` is set to `word` so word token counts are used as features. + If you want to use character n-grams, set `analyzer` to `char` or `char_wb`. + The lower and upper boundaries of the n-grams can be configured via the parameters `min_ngram` and `max_ngram`. + By default both of them are set to `1`. + + :::note + Option `char_wb` creates character n-grams only from text inside word boundaries; + n-grams at the edges of words are padded with space. + This option can be used to create [Subword Semantic Hashing](https://arxiv.org/abs/1810.07150). + + ::: + + :::note + For character n-grams do not forget to increase `min_ngram` and `max_ngram` parameters. + Otherwise the vocabulary will contain only single letters. + + ::: + + Handling Out-Of-Vocabulary (OOV) words: + + :::note + Enabled only if `analyzer` is `word`. + + ::: + + Since the training is performed on limited vocabulary data, it cannot be guaranteed that during prediction + an algorithm will not encounter an unknown word (a word that were not seen during training). + In order to teach an algorithm how to treat unknown words, some words in training data can be substituted + by generic word `OOV_token`. + In this case during prediction all unknown words will be treated as this generic word `OOV_token`. + + For example, one might create separate intent `outofscope` in the training data containing messages of + different number of `OOV_token` s and maybe some additional general words. + Then an algorithm will likely classify a message with unknown words as this intent `outofscope`. + + You can either set the `OOV_token` or a list of words `OOV_words`: + + * `OOV_token` set a keyword for unseen words; if training data contains `OOV_token` as words in some + messages, during prediction the words that were not seen during training will be substituted with + provided `OOV_token`; if `OOV_token=None` (default behavior) words that were not seen during + training will be ignored during prediction time; + + * `OOV_words` set a list of words to be treated as `OOV_token` during training; if a list of words + that should be treated as Out-Of-Vocabulary is known, it can be set to `OOV_words` instead of manually + changing it in training data or using custom preprocessor. + + :::note + This featurizer creates a bag-of-words representation by **counting** words, + so the number of `OOV_token` in the sentence might be important. + + ::: + + :::note + Providing `OOV_words` is optional, training data can contain `OOV_token` input manually or by custom + additional preprocessor. + Unseen words will be substituted with `OOV_token` **only** if this token is present in the training + data or `OOV_words` list is provided. + + ::: + + If you want to share the vocabulary between user messages and intents, you need to set the option + `use_shared_vocab` to `True`. In that case a common vocabulary set between tokens in intents and user messages + is build. + + ```yaml + pipeline: + - name: "CountVectorsFeaturizer" + # Analyzer to use, either 'word', 'char', or 'char_wb' + "analyzer": "word" + # Set the lower and upper boundaries for the n-grams + "min_ngram": 1 + "max_ngram": 1 + # Set the out-of-vocabulary token + "OOV_token": "_oov_" + # Whether to use a shared vocab + "use_shared_vocab": False + ``` + + <details> + <summary> + The above configuration parameters are the ones you should configure to fit your model to your data. + However, additional parameters exist that can be adapted. + </summary> + + ``` + +-------------------+-------------------------+--------------------------------------------------------------+ + | Parameter | Default Value | Description | + +===================+=========================+==============================================================+ + | use_shared_vocab | False | If set to 'True' a common vocabulary is used for labels | + | | | and user message. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | analyzer | word | Whether the features should be made of word n-gram or | + | | | character n-grams. Option 'char_wb' creates character | + | | | n-grams only from text inside word boundaries; | + | | | n-grams at the edges of words are padded with space. | + | | | Valid values: 'word', 'char', 'char_wb'. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | strip_accents | None | Remove accents during the pre-processing step. | + | | | Valid values: 'ascii', 'unicode', 'None'. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | stop_words | None | A list of stop words to use. | + | | | Valid values: 'english' (uses an internal list of | + | | | English stop words), a list of custom stop words, or | + | | | 'None'. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | min_df | 1 | When building the vocabulary ignore terms that have a | + | | | document frequency strictly lower than the given threshold. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | max_df | 1 | When building the vocabulary ignore terms that have a | + | | | document frequency strictly higher than the given threshold | + | | | (corpus-specific stop words). | + +-------------------+-------------------------+--------------------------------------------------------------+ + | min_ngram | 1 | The lower boundary of the range of n-values for different | + | | | word n-grams or char n-grams to be extracted. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | max_ngram | 1 | The upper boundary of the range of n-values for different | + | | | word n-grams or char n-grams to be extracted. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | max_features | None | If not 'None', build a vocabulary that only consider the top | + | | | max_features ordered by term frequency across the corpus. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | lowercase | True | Convert all characters to lowercase before tokenizing. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | OOV_token | None | Keyword for unseen words. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | OOV_words | [] | List of words to be treated as 'OOV_token' during training. | + +-------------------+-------------------------+--------------------------------------------------------------+ + | alias | CountVectorFeaturizer | Alias name of featurizer. | + +-------------------+-------------------------+--------------------------------------------------------------+ + ``` + + </details> + + +## LexicalSyntacticFeaturizer + + +* **Short** + + Creates lexical and syntactic features for a user message to support entity extraction. + + + +* **Outputs** + + `sparse_features` for user messages + + + +* **Requires** + + `tokens` + + + +* **Type** + + Sparse featurizer + + + +* **Description** + + Creates features for entity extraction. + Moves with a sliding window over every token in the user message and creates features according to the + configuration (see below). As a default configuration is present, you don't need to specify a configuration. + + + +* **Configuration** + + You can configure what kind of lexical and syntactic features the featurizer should extract. + The following features are available: + + ``` + ============== ========================================================================================== + Feature Name Description + ============== ========================================================================================== + BOS Checks if the token is at the beginning of the sentence. + EOS Checks if the token is at the end of the sentence. + low Checks if the token is lower case. + upper Checks if the token is upper case. + title Checks if the token starts with an uppercase character and all remaining characters are + lowercased. + digit Checks if the token contains just digits. + prefix5 Take the first five characters of the token. + prefix2 Take the first two characters of the token. + suffix5 Take the last five characters of the token. + suffix3 Take the last three characters of the token. + suffix2 Take the last two characters of the token. + suffix1 Take the last character of the token. + pos Take the Part-of-Speech tag of the token (``SpacyTokenizer`` required). + pos2 Take the first two characters of the Part-of-Speech tag of the token + (``SpacyTokenizer`` required). + ============== ========================================================================================== + ``` + + As the featurizer is moving over the tokens in a user message with a sliding window, you can define features for + previous tokens, the current token, and the next tokens in the sliding window. + You define the features as a [before, token, after] array. + If you want to define features for the token before, the current token, and the token after, + your features configuration would look like this: + + ```yaml + pipeline: + - name: LexicalSyntacticFeaturizer + "features": [ + ["low", "title", "upper"], + ["BOS", "EOS", "low", "upper", "title", "digit"], + ["low", "title", "upper"], + ] + ``` + + This configuration is also the default configuration. + + :::note + If you want to make use of `pos` or `pos2` you need to add `SpacyTokenizer` to your pipeline. + + ::: diff --git a/docs/docs/components/intent-classifiers.mdx b/docs/docs/components/intent-classifiers.mdx new file mode 100644 index 000000000000..58f9598a59b0 --- /dev/null +++ b/docs/docs/components/intent-classifiers.mdx @@ -0,0 +1,550 @@ +--- +id: intent-classifiers +sidebar_label: Intent Classifiers +title: Intent Classifiers +--- + +Intent classifiers assign one of the intents defined in the domain file to incoming user messages. + +## MitieIntentClassifier + + +* **Short** + + MITIE intent classifier (using a + [text categorizer](https://github.com/mit-nlp/MITIE/blob/master/examples/python/text_categorizer_pure_model.py)) + + + +* **Outputs** + + `intent` + + + +* **Requires** + + `tokens` for user message and [MitieNLP](../components/language-models.mdx#mitienlp) + + + +* **Output-Example** + + ```json + { + "intent": {"name": "greet", "confidence": 0.98343} + } + ``` + + + +* **Description** + + This classifier uses MITIE to perform intent classification. The underlying classifier + is using a multi-class linear SVM with a sparse linear kernel (see + [MITIE trainer code](https://github.com/mit-nlp/MITIE/blob/master/mitielib/src/text_categorizer_trainer.cpp#L222)). + + :::note + This classifier does not rely on any featurizer as it extracts features on its own. + + ::: + + + +* **Configuration** + + ```yaml + pipeline: + - name: "MitieIntentClassifier" + ``` + + +## SklearnIntentClassifier + + +* **Short** + + Sklearn intent classifier + + + +* **Outputs** + + `intent` and `intent_ranking` + + + +* **Requires** + + `dense_features` for user messages + + + +* **Output-Example** + + ```json + { + "intent": {"name": "greet", "confidence": 0.78343}, + "intent_ranking": [ + { + "confidence": 0.1485910906220309, + "name": "goodbye" + }, + { + "confidence": 0.08161531595656784, + "name": "restaurant_search" + } + ] + } + ``` + + + +* **Description** + + The sklearn intent classifier trains a linear SVM which gets optimized using a grid search. It also provides + rankings of the labels that did not “win”. The `SklearnIntentClassifier` needs to be preceded by a dense + featurizer in the pipeline. This dense featurizer creates the features used for the classification. + For more information about the algorithm itself, take a look at the + [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) + documentation. + + + +* **Configuration** + + During the training of the SVM a hyperparameter search is run to find the best parameter set. + In the configuration you can specify the parameters that will get tried. + + ```yaml + pipeline: + - name: "SklearnIntentClassifier" + # Specifies the list of regularization values to + # cross-validate over for C-SVM. + # This is used with the ``kernel`` hyperparameter in GridSearchCV. + C: [1, 2, 5, 10, 20, 100] + # Specifies the kernel to use with C-SVM. + # This is used with the ``C`` hyperparameter in GridSearchCV. + kernels: ["linear"] + # Gamma parameter of the C-SVM. + "gamma": [0.1] + # We try to find a good number of cross folds to use during + # intent training, this specifies the max number of folds. + "max_cross_validation_folds": 5 + # Scoring function used for evaluating the hyper parameters. + # This can be a name or a function. + "scoring_function": "f1_weighted" + ``` + + +## KeywordIntentClassifier + + +* **Short** + + Simple keyword matching intent classifier, intended for small, short-term projects. + + + +* **Outputs** + + `intent` + + + +* **Requires** + + Nothing + + + +* **Output-Example** + + ```json + { + "intent": {"name": "greet", "confidence": 1.0} + } + ``` + + + +* **Description** + + This classifier works by searching a message for keywords. + The matching is case sensitive by default and searches only for exact matches of the keyword-string in the user + message. + The keywords for an intent are the examples of that intent in the NLU training data. + This means the entire example is the keyword, not the individual words in the example. + + :::note + This classifier is intended only for small projects or to get started. If + you have few NLU training data, you can take a look at the recommended pipelines in + [Tuning Your Model](./tuning-your-model.mdx). + + ::: + + + +* **Configuration** + + ```yaml + pipeline: + - name: "KeywordIntentClassifier" + case_sensitive: True + ``` + + +## DIETClassifier + + +* **Short** + + Dual Intent Entity Transformer (DIET) used for intent classification and entity extraction + + + +* **Outputs** + + `entities`, `intent` and `intent_ranking` + + + +* **Requires** + + `dense_features` and/or `sparse_features` for user message and optionally the intent + + + +* **Output-Example** + + ```json + { + "intent": {"name": "greet", "confidence": 0.8343}, + "intent_ranking": [ + { + "confidence": 0.385910906220309, + "name": "goodbye" + }, + { + "confidence": 0.28161531595656784, + "name": "restaurant_search" + } + ], + "entities": [{ + "end": 53, + "entity": "time", + "start": 48, + "value": "2017-04-10T00:00:00.000+02:00", + "confidence": 1.0, + "extractor": "DIETClassifier" + }] + } + ``` + + + +* **Description** + + DIET (Dual Intent and Entity Transformer) is a multi-task architecture for intent classification and entity + recognition. The architecture is based on a transformer which is shared for both tasks. + A sequence of entity labels is predicted through a Conditional Random Field (CRF) tagging layer on top of the + transformer output sequence corresponding to the input sequence of tokens. + For the intent labels the transformer output for the complete utterance and intent labels are embedded into a + single semantic vector space. We use the dot-product loss to maximize the similarity with the target label and + minimize similarities with negative samples. + + If you want to learn more about the model, please take a look at our + [videos](https://www.youtube.com/playlist?list=PL75e0qA87dlG-za8eLI6t0_Pbxafk-cxb) where we explain the model + architecture in detail. + + :::note + If during prediction time a message contains **only** words unseen during training + and no Out-Of-Vocabulary preprocessor was used, an empty intent `None` is predicted with confidence + `0.0`. This might happen if you only use the [CountVectorsFeaturizer](../components/featurizers.mdx#countvectorsfeaturizer) with a `word` analyzer + as featurizer. If you use the `char_wb` analyzer, you should always get an intent with a confidence + value `> 0.0`. + + ::: + + + +* **Configuration** + + If you want to use the `DIETClassifier` just for intent classification, set `entity_recognition` to `False`. + If you want to do only entity recognition, set `intent_classification` to `False`. + By default `DIETClassifier` does both, i.e. `entity_recognition` and `intent_classification` are set to + `True`. + + You can define a number of hyperparameters to adapt the model. + If you want to adapt your model, start by modifying the following parameters: + + * `epochs`: + This parameter sets the number of times the algorithm will see the training data (default: `300`). + One `epoch` is equals to one forward pass and one backward pass of all the training examples. + Sometimes the model needs more epochs to properly learn. + Sometimes more epochs don't influence the performance. + The lower the number of epochs the faster the model is trained. + + * `hidden_layers_sizes`: + This parameter allows you to define the number of feed forward layers and their output + dimensions for user messages and intents (default: `text: [], label: []`). + Every entry in the list corresponds to a feed forward layer. + For example, if you set `text: [256, 128]`, we will add two feed forward layers in front of + the transformer. The vectors of the input tokens (coming from the user message) will be passed on to those + layers. The first layer will have an output dimension of 256 and the second layer will have an output + dimension of 128. If an empty list is used (default behavior), no feed forward layer will be + added. + Make sure to use only positive integer values. Usually, numbers of power of two are used. + Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the + value before. + + * `embedding_dimension`: + This parameter defines the output dimension of the embedding layers used inside the model (default: `20`). + We are using multiple embeddings layers inside the model architecture. + For example, the vector of the complete utterance and the intent is passed on to an embedding layer before + they are compared and the loss is calculated. + + * `number_of_transformer_layers`: + This parameter sets the number of transformer layers to use (default: `2`). + The number of transformer layers corresponds to the transformer blocks to use for the model. + + * `transformer_size`: + This parameter sets the number of units in the transformer (default: `256`). + The vectors coming out of the transformers will have the given `transformer_size`. + + * `weight_sparsity`: + This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers + in the model (default: `0.8`). The value should be between 0 and 1. If you set `weight_sparsity` + to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not + set `weight_sparsity` to 1 as this would result in all kernel weights being 0, i.e. the model is not able + to learn. + + <details> + <summary> + The above configuration parameters are the ones you should configure to fit your model to your data. + However, additional parameters exist that can be adapted. + </summary> + + ``` + +---------------------------------+------------------+--------------------------------------------------------------+ + | Parameter | Default Value | Description | + +=================================+==================+==============================================================+ + | hidden_layers_sizes | text: [] | Hidden layer sizes for layers before the embedding layers | + | | label: [] | for user messages and labels. The number of hidden layers is | + | | | equal to the length of the corresponding. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | share_hidden_layers | False | Whether to share the hidden layer weights between user | + | | | messages and labels. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | transformer_size | 256 | Number of units in transformer. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | number_of_transformer_layers | 2 | Number of transformer layers. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | number_of_attention_heads | 4 | Number of attention heads in transformer. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | max_relative_position | None | Maximum position for relative embeddings. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | unidirectional_encoder | False | Use a unidirectional or bidirectional encoder. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | batch_size | [64, 256] | Initial and final value for batch sizes. | + | | | Batch size will be linearly increased for each epoch. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | batch_strategy | "balanced" | Strategy used when creating batches. | + | | | Can be either 'sequence' or 'balanced'. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | epochs | 300 | Number of epochs to train. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | random_seed | None | Set random seed to any 'int' to get reproducible results. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | learning_rate | 0.001 | Initial learning rate for the optimizer. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | embedding_dimension | 20 | Dimension size of embedding vectors. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | dense_dimension | text: 512 | Dense dimension for sparse features to use if no dense | + | | label: 20 | features are present. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | concat_dimension | text: 512 | Concat dimension for sequence and sentence features. | + | | label: 20 | | + +---------------------------------+------------------+--------------------------------------------------------------+ + | number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | + | | | their similarity to the user input during training. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | + | | | or 'inner'. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | ranking_length | 10 | Number of top actions to normalize scores for loss type | + | | | 'softmax'. Set to 0 to turn off normalization. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | + | | | embedding vectors for correct labels. | + | | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | maximum_negative_similarity | -0.4 | Maximum negative similarity for incorrect labels. | + | | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | + | | | over incorrect intent labels, used only if 'loss_type' is | + | | | set to 'margin'. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | scale_loss | False | Scale loss inverse proportionally to confidence of correct | + | | | prediction. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | regularization_constant | 0.002 | The scale of regularization. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | negative_margin_scale | 0.8 | The scale of how important it is to minimize the maximum | + | | | similarity between embeddings of different labels. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | + | | | Value should be between 0 and 1. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | drop_rate | 0.2 | Dropout rate for encoder. Value should be between 0 and 1. | + | | | The higher the value the higher the regularization effect. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | + | | | The higher the value the higher the regularization effect. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_sparse_input_dropout | True | If 'True' apply dropout to sparse input tensors. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_dense_input_dropout | True | If 'True' apply dropout to dense input tensors. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | + | | | Set to '-1' to evaluate just once at the end of training. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | + | | | Large values may hurt performance, e.g. model accuracy. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | intent_classification | True | If 'True' intent classification is trained and intents are | + | | | predicted. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | entity_recognition | True | If 'True' entity recognition is trained and entities are | + | | | extracted. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | use_masked_language_model | False | If 'True' random tokens of the input message will be masked | + | | | and the model has to predict those tokens. It acts like a | + | | | regularizer and should help to learn a better contextual | + | | | representation of the input. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | tensorboard_log_directory | None | If you want to use tensorboard to visualize training | + | | | metrics, set this option to a valid output directory. You | + | | | can view the training metrics after training in tensorboard | + | | | via 'tensorboard --logdir <path-to-given-directory>'. | + +---------------------------------+------------------+--------------------------------------------------------------+ + | tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | + | | | logged. Either after every epoch ('epoch') or for every | + | | | training step ('minibatch'). | + +---------------------------------+------------------+--------------------------------------------------------------+ + | featurizers | [] | List of featurizer names (alias names). Only features | + | | | coming from the listed names are used. If list is empty | + | | | all available features are used. | + +---------------------------------+------------------+--------------------------------------------------------------+ + ``` + + :::note + For `cosine` similarity `maximum_positive_similarity` and `maximum_negative_similarity` should + be between `-1` and `1`. + + ::: + + :::note + There is an option to use linearly increasing batch size. The idea comes from + [https://arxiv.org/abs/1711.00489](https://arxiv.org/abs/1711.00489). + In order to do it pass a list to `batch_size`, e.g. `"batch_size": [64, 256]` (default behavior). + If constant `batch_size` is required, pass an `int`, e.g. `"batch_size": 64`. + + ::: + + :::note + Parameter `maximum_negative_similarity` is set to a negative value to mimic the original + starspace algorithm in the case `maximum_negative_similarity = maximum_positive_similarity` + and `use_maximum_negative_similarity = False`. + See [starspace paper](https://arxiv.org/abs/1709.03856) for details. + + ::: + + </details> + +## FallbackClassifier + +* **Short** + + Classifies a message with the intent `nlu_fallback` if the NLU intent classification + scores are ambiguous. + +* **Outputs** + + `entities`, `intent` and `intent_ranking` + +* **Requires** + + `intent` and `intent_ranking` output from a previous intent classifier + +* **Output-Example** + + ```json + + { + "intent": {"name": "nlu_fallback", "confidence": 1.0}, + "intent_ranking": [ + { + "confidence": 1.0, + "name": "nlu_fallback" + }, + { + "confidence": 0.28161531595656784, + "name": "restaurant_search" + } + ], + "entities": [{ + "end": 53, + "entity": "time", + "start": 48, + "value": "2017-04-10T00:00:00.000+02:00", + "confidence": 1.0, + "extractor": "DIETClassifier" + }] + } + ``` + +* **Description** + + The `FallbackClassifier` classifies a user message with the intent `nlu_fallback` + in case the previous intent classifier wasn't + able to classify an intent with a confidence greater or equal than the `threshold` + of the `FallbackClassifier`. It can also predict the fallback intent in the + case when the confidence scores of the two top ranked intents are closer than the the + `ambiguity_threshold`. + + You can use the `FallbackClassifier` to implement a + [Fallback Action](./fallback-handoff.mdx#fallbackactions) which handles message with uncertain + NLU predictions. + + ```yaml + rules: + + - rule: Ask the user to rephrase in case of low NLU confidence + steps: + - intent: nlu_fallback + - action: utter_please_rephrase + ``` +* **Configuration** + + The `FallbackClassifier` will only add its prediction for the `nlu_fallback` + intent in case no other intent was predicted with a confidence greater or equal + than `threshold`. + + - `threshold`: + This parameter sets the threshold for predicting the `nlu_fallback` intent. + If no intent predicted by a previous + intent classifier has a confidence + level greater or equal than `threshold` the `FallbackClassifier` will add + a prediction of the `nlu_fallback` intent with a confidence `1.0`. + - `ambiguity_threshold`: If you configure an `ambiguity_threshold`, the + `FallbackClassifier` will also predict the `nlu_fallback` intent in case + the difference of the confidence scores for the two highest ranked intents is + smaller than the `ambiguity_threshold`. diff --git a/docs/docs/components/language-models.mdx b/docs/docs/components/language-models.mdx new file mode 100644 index 000000000000..2ebf18e29fe8 --- /dev/null +++ b/docs/docs/components/language-models.mdx @@ -0,0 +1,211 @@ +--- +id: language-models +sidebar_label: Language Models +title: Language Models +--- + +The following components load pre-trained models that are needed if you want to use pre-trained +word vectors in your pipeline. + + +## MitieNLP + + +* **Short** + + MITIE initializer + + + +* **Outputs** + + Nothing + + + +* **Requires** + + Nothing + + + +* **Description** + + Initializes MITIE structures. Every MITIE component relies on this, + hence this should be put at the beginning + of every pipeline that uses any MITIE components. + + + +* **Configuration** + + The MITIE library needs a language model file, that **must** be specified in + the configuration: + + ```yaml + pipeline: + - name: "MitieNLP" + # language model to load + model: "data/total_word_feature_extractor.dat" + ``` + + For more information where to get that file from, head over to + [installing MITIE](../installation.mdx#install-mitie). + + + You can also pre-train your own word vectors from a language corpus using MITIE. To do so: + + 1. Get a clean language corpus (a Wikipedia dump works) as a set of text files. + + 2. Build and run [MITIE Wordrep Tool](https://github.com/mit-nlp/MITIE/tree/master/tools/wordrep) on your corpus. + This can take several hours/days depending on your dataset and your workstation. + You'll need something like 128GB of RAM for wordrep to run – yes, that's a lot: try to extend your swap. + + 3. Set the path of your new `total_word_feature_extractor.dat` as the `model` parameter in your + [configuration](./tuning-your-model.mdx#section-mitie-pipeline). + + For a full example of how to train MITIE word vectors, check out + [this blogpost](http://www.crownpku.com/2017/07/27/%E7%94%A8Rasa_NLU%E6%9E%84%E5%BB%BA%E8%87%AA%E5%B7%B1%E7%9A%84%E4%B8%AD%E6%96%87NLU%E7%B3%BB%E7%BB%9F.html) + of creating a MITIE model from a Chinese Wikipedia dump. + + + +## SpacyNLP + + +* **Short** + + spaCy language initializer + + + +* **Outputs** + + Nothing + + + +* **Requires** + + Nothing + + + +* **Description** + + Initializes spaCy structures. Every spaCy component relies on this, hence this should be put at the beginning + of every pipeline that uses any spaCy components. + + + +* **Configuration** + + You need to specify the language model to use. + By default the language configured in the pipeline will be used as the language model name. + If the spaCy model to be used has a name that is different from the language tag (`"en"`, `"de"`, etc.), + the model name can be specified using the configuration variable `model`. + The name will be passed to `spacy.load(name)`. + + ```yaml + pipeline: + - name: "SpacyNLP" + # language model to load + model: "en_core_web_md" + + # when retrieving word vectors, this will decide if the casing + # of the word is relevant. E.g. `hello` and `Hello` will + # retrieve the same vector, if set to `False`. For some + # applications and models it makes sense to differentiate + # between these two words, therefore setting this to `True`. + case_sensitive: False + ``` + + For more information on how to download the spaCy models, head over to + [installing SpaCy](../installation.mdx#install-spacy). + + In addition to SpaCy's pretrained language models, you can also use this component to + load fastText vectors, which are available for [hundreds of languages](https://github.com/facebookresearch/fastText/blob/master/docs/crawl-vectors.md). + If you want to incorporate a custom model you've found into spaCy, check out their page on + [adding languages](https://spacy.io/usage/adding-languages/). As described in the documentation, you need to + register your language model and link it to the language identifier, which will allow Rasa to load and use your new language + by passing in your language identifier as the `language` option. + + +## HFTransformersNLP + + +* **Short** + + HuggingFace's Transformers based pre-trained language model initializer + + + +* **Outputs** + + Nothing + + + +* **Requires** + + Nothing + + + +* **Description** + + Initializes specified pre-trained language model from HuggingFace's [Transformers library](https://huggingface.co/transformers/). The component applies language model specific tokenization and + featurization to compute sequence and sentence level representations for each example in the training data. + Include [LanguageModelTokenizer](../components/tokenizers.mdx#languagemodeltokenizer) and [LanguageModelFeaturizer](../components/featurizers.mdx#languagemodelfeaturizer) to utilize the output of this + component for downstream NLU models. + + :::note + To use `HFTransformersNLP` component, install Rasa Open Source with `pip install rasa[transformers]`. + + ::: + + + +* **Configuration** + + You should specify what language model to load via the parameter `model_name`. See the below table for the + available language models. + Additionally, you can also specify the architecture variation of the chosen language model by specifying the + parameter `model_weights`. + The full list of supported architectures can be found + [here](https://huggingface.co/transformers/pretrained_models.html). + If left empty, it uses the default model architecture that original Transformers library loads (see table below). + + ``` + +----------------+--------------+-------------------------+ + | Language Model | Parameter | Default value for | + | | "model_name" | "model_weights" | + +----------------+--------------+-------------------------+ + | BERT | bert | bert-base-uncased | + +----------------+--------------+-------------------------+ + | GPT | gpt | openai-gpt | + +----------------+--------------+-------------------------+ + | GPT-2 | gpt2 | gpt2 | + +----------------+--------------+-------------------------+ + | XLNet | xlnet | xlnet-base-cased | + +----------------+--------------+-------------------------+ + | DistilBERT | distilbert | distilbert-base-uncased | + +----------------+--------------+-------------------------+ + | RoBERTa | roberta | roberta-base | + +----------------+--------------+-------------------------+ + ``` + + The following configuration loads the language model BERT: + + ```yaml + pipeline: + - name: HFTransformersNLP + # Name of the language model to use + model_name: "bert" + # Pre-Trained weights to be loaded + model_weights: "bert-base-uncased" + + # An optional path to a specific directory to download and cache the pre-trained model weights. + # The `default` cache_dir is the same as https://huggingface.co/transformers/serialization.html#cache-directory . + cache_dir: null + ``` diff --git a/docs/docs/components/selectors.mdx b/docs/docs/components/selectors.mdx new file mode 100644 index 000000000000..0723a97306c0 --- /dev/null +++ b/docs/docs/components/selectors.mdx @@ -0,0 +1,259 @@ +--- +id: selectors +sidebar_label: Selectors +title: Selectors +--- + +Selectors predict a bot response from a set of candidate responses. + + +## ResponseSelector + + +* **Short** + + Response Selector + + + +* **Outputs** + + A dictionary with key as `direct_response_intent` and value containing `response` and `ranking` + + + +* **Requires** + + `dense_features` and/or `sparse_features` for user messages and response + + + +* **Output-Example** + + ```json + { + "response_selector": { + "faq": { + "response": {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, + "ranking": [ + {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, + {"confidence": 0.2134543431, "name": "You can ask me about how to get started"} + ] + } + } + } + ``` + + + +* **Description** + + Response Selector component can be used to build a response retrieval model to directly predict a bot response from + a set of candidate responses. The prediction of this model is used by [Retrieval Actions](../retrieval-actions). + It embeds user inputs and response labels into the same space and follows the exact same + neural network architecture and optimization as the [DIETClassifier](../components/intent-classifiers.mdx#dietclassifier). + + :::note + If during prediction time a message contains **only** words unseen during training + and no Out-Of-Vocabulary preprocessor was used, an empty response `None` is predicted with confidence + `0.0`. This might happen if you only use the [CountVectorsFeaturizer](../components/featurizers.mdx#countvectorsfeaturizer) with a `word` analyzer + as featurizer. If you use the `char_wb` analyzer, you should always get a response with a confidence + value `> 0.0`. + + ::: + + + +* **Configuration** + + The algorithm includes almost all the hyperparameters that [DIETClassifier](../components/intent-classifiers.mdx#dietclassifier) uses. + If you want to adapt your model, start by modifying the following parameters: + + * `epochs`: + This parameter sets the number of times the algorithm will see the training data (default: `300`). + One `epoch` is equals to one forward pass and one backward pass of all the training examples. + Sometimes the model needs more epochs to properly learn. + Sometimes more epochs don't influence the performance. + The lower the number of epochs the faster the model is trained. + + * `hidden_layers_sizes`: + This parameter allows you to define the number of feed forward layers and their output + dimensions for user messages and intents (default: `text: [256, 128], label: [256, 128]`). + Every entry in the list corresponds to a feed forward layer. + For example, if you set `text: [256, 128]`, we will add two feed forward layers in front of + the transformer. The vectors of the input tokens (coming from the user message) will be passed on to those + layers. The first layer will have an output dimension of 256 and the second layer will have an output + dimension of 128. If an empty list is used (default behavior), no feed forward layer will be + added. + Make sure to use only positive integer values. Usually, numbers of power of two are used. + Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the + value before. + + * `embedding_dimension`: + This parameter defines the output dimension of the embedding layers used inside the model (default: `20`). + We are using multiple embeddings layers inside the model architecture. + For example, the vector of the complete utterance and the intent is passed on to an embedding layer before + they are compared and the loss is calculated. + + * `number_of_transformer_layers`: + This parameter sets the number of transformer layers to use (default: `0`). + The number of transformer layers corresponds to the transformer blocks to use for the model. + + * `transformer_size`: + This parameter sets the number of units in the transformer (default: `None`). + The vectors coming out of the transformers will have the given `transformer_size`. + + * `weight_sparsity`: + This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers + in the model (default: `0.8`). The value should be between 0 and 1. If you set `weight_sparsity` + to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not + set `weight_sparsity` to 1 as this would result in all kernel weights being 0, i.e. the model is not able + to learn. + + In addition, the component can also be configured to train a response selector for a particular retrieval intent. + The parameter `retrieval_intent` sets the name of the intent for which this response selector model is trained. + Default is `None`, i.e. the model is trained for all retrieval intents. + + <details> + <summary> + The above configuration parameters are the ones you should configure to fit your model to your data. + However, additional parameters exist that can be adapted. + </summary> + + ``` + +---------------------------------+-------------------+--------------------------------------------------------------+ + | Parameter | Default Value | Description | + +=================================+===================+==============================================================+ + | hidden_layers_sizes | text: [256, 128] | Hidden layer sizes for layers before the embedding layers | + | | label: [256, 128] | for user messages and labels. The number of hidden layers is | + | | | equal to the length of the corresponding. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | share_hidden_layers | False | Whether to share the hidden layer weights between user | + | | | messages and labels. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | transformer_size | None | Number of units in transformer. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | number_of_transformer_layers | 0 | Number of transformer layers. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | number_of_attention_heads | 4 | Number of attention heads in transformer. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | max_relative_position | None | Maximum position for relative embeddings. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | unidirectional_encoder | False | Use a unidirectional or bidirectional encoder. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | batch_size | [64, 256] | Initial and final value for batch sizes. | + | | | Batch size will be linearly increased for each epoch. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | batch_strategy | "balanced" | Strategy used when creating batches. | + | | | Can be either 'sequence' or 'balanced'. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | epochs | 300 | Number of epochs to train. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | random_seed | None | Set random seed to any 'int' to get reproducible results. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | learning_rate | 0.001 | Initial learning rate for the optimizer. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | embedding_dimension | 20 | Dimension size of embedding vectors. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | dense_dimension | text: 512 | Dense dimension for sparse features to use if no dense | + | | label: 512 | features are present. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | concat_dimension | text: 512 | Concat dimension for sequence and sentence features. | + | | label: 512 | | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | + | | | their similarity to the user input during training. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | + | | | or 'inner'. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | ranking_length | 10 | Number of top actions to normalize scores for loss type | + | | | 'softmax'. Set to 0 to turn off normalization. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | + | | | embedding vectors for correct labels. | + | | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | maximum_negative_similarity | -0.4 | Maximum negative similarity for incorrect labels. | + | | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | + | | | over incorrect intent labels, used only if 'loss_type' is | + | | | set to 'margin'. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | scale_loss | True | Scale loss inverse proportionally to confidence of correct | + | | | prediction. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | regularization_constant | 0.002 | The scale of regularization. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | negative_margin_scale | 0.8 | The scale of how important is to minimize the maximum | + | | | similarity between embeddings of different labels. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | + | | | Value should be between 0 and 1. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | drop_rate | 0.2 | Dropout rate for encoder. Value should be between 0 and 1. | + | | | The higher the value the higher the regularization effect. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | + | | | The higher the value the higher the regularization effect. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_sparse_input_dropout | False | If 'True' apply dropout to sparse input tensors. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_dense_input_dropout | False | If 'True' apply dropout to dense input tensors. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | + | | | Set to '-1' to evaluate just once at the end of training. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | + | | | Large values may hurt performance, e.g. model accuracy. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | use_masked_language_model | False | If 'True' random tokens of the input message will be masked | + | | | and the model should predict those tokens. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | retrieval_intent | None | Name of the intent for which this response selector model is | + | | | trained. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | tensorboard_log_directory | None | If you want to use tensorboard to visualize training | + | | | metrics, set this option to a valid output directory. You | + | | | can view the training metrics after training in tensorboard | + | | | via 'tensorboard --logdir <path-to-given-directory>'. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | + | | | logged. Either after every epoch ("epoch") or for every | + | | | training step ("minibatch"). | + +---------------------------------+-------------------+--------------------------------------------------------------+ + | featurizers | [] | List of featurizer names (alias names). Only features | + | | | coming from the listed names are used. If list is empty | + | | | all available features are used. | + +---------------------------------+-------------------+--------------------------------------------------------------+ + ``` + + :::note + For `cosine` similarity `maximum_positive_similarity` and `maximum_negative_similarity` should + be between `-1` and `1`. + + ::: + + :::note + There is an option to use linearly increasing batch size. The idea comes from + [https://arxiv.org/abs/1711.00489](https://arxiv.org/abs/1711.00489). + In order to do it pass a list to `batch_size`, e.g. `"batch_size": [64, 256]` (default behavior). + If constant `batch_size` is required, pass an `int`, e.g. `"batch_size": 64`. + + ::: + + :::note + Parameter `maximum_negative_similarity` is set to a negative value to mimic the original + starspace algorithm in the case `maximum_negative_similarity = maximum_positive_similarity` + and `use_maximum_negative_similarity = False`. + See [starspace paper](https://arxiv.org/abs/1709.03856) for details. + + ::: + </details> diff --git a/docs/docs/components/tokenizers.mdx b/docs/docs/components/tokenizers.mdx new file mode 100644 index 000000000000..284a8f6203d0 --- /dev/null +++ b/docs/docs/components/tokenizers.mdx @@ -0,0 +1,281 @@ +--- +id: tokenizers +sidebar_label: Tokenizers +title: Tokenizers +--- + +Tokenizers split text into tokens. +If you want to split intents into multiple labels, e.g. for predicting multiple intents or for +modeling hierarchical intent structure, use the following flags with any tokenizer: + +* `intent_tokenization_flag` indicates whether to tokenize intent labels or not. Set it to `True`, so that intent + labels are tokenized. + +* `intent_split_symbol` sets the delimiter string to split the intent labels, default is underscore + (`_`). + + +## WhitespaceTokenizer + + +* **Short** + + Tokenizer using whitespaces as a separator + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + Nothing + + + +* **Description** + + Creates a token for every whitespace separated character sequence. + + + +* **Configuration** + + ```yaml + pipeline: + - name: "WhitespaceTokenizer" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + # Regular expression to detect tokens + "token_pattern": None + ``` + + +## JiebaTokenizer + + +* **Short** + + Tokenizer using Jieba for Chinese language + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + Nothing + + + +* **Description** + + Creates tokens using the Jieba tokenizer specifically for Chinese + language. It will only work for the Chinese language. + + :::note + To use `JiebaTokenizer` you need to install Jieba with `pip install jieba`. + + ::: + + + +* **Configuration** + + User's custom dictionary files can be auto loaded by specifying the files' directory path via `dictionary_path`. + If the `dictionary_path` is `None` (the default), then no custom dictionary will be used. + + ```yaml + pipeline: + - name: "JiebaTokenizer" + dictionary_path: "path/to/custom/dictionary/dir" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + # Regular expression to detect tokens + "token_pattern": None + ``` + + +## MitieTokenizer + + +* **Short** + + Tokenizer using MITIE + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + [MitieNLP](../components/language-models.mdx#mitienlp) + + + +* **Description** + + Creates tokens using the MITIE tokenizer. + + + +* **Configuration** + + ```yaml + pipeline: + - name: "MitieTokenizer" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + # Regular expression to detect tokens + "token_pattern": None + ``` + + +## SpacyTokenizer + + +* **Short** + + Tokenizer using spaCy + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + [SpacyNLP](../components/language-models.mdx#spacynlp) + + + +* **Description** + + Creates tokens using the spaCy tokenizer. + + + +* **Configuration** + + ```yaml + pipeline: + - name: "SpacyTokenizer" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + # Regular expression to detect tokens + "token_pattern": None + ``` + + +## ConveRTTokenizer + + +* **Short** + + Tokenizer using [ConveRT](https://github.com/PolyAI-LDN/polyai-models#convert) model. + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + Nothing + + + +* **Description** + + Creates tokens using the ConveRT tokenizer. Must be used whenever the [ConveRTFeaturizer](../components/featurizers.mdx#convertfeaturizer) is used. + + :::note + Since `ConveRT` model is trained only on an English corpus of conversations, this tokenizer should only + be used if your training data is in English language. + + ::: + + :::note + To use `ConveRTTokenizer`, install Rasa Open Source with `pip install rasa[convert]`. + + ::: + + + +* **Configuration** + + ```yaml + pipeline: + - name: "ConveRTTokenizer" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + # Regular expression to detect tokens + "token_pattern": None + ``` + + +## LanguageModelTokenizer + + +* **Short** + + Tokenizer from pre-trained language models + + + +* **Outputs** + + `tokens` for user messages, responses (if present), and intents (if specified) + + + +* **Requires** + + [HFTransformersNLP](../components/language-models.mdx#hftransformersnlp) + + + +* **Description** + + Creates tokens using the pre-trained language model specified in upstream [HFTransformersNLP](../components/language-models.mdx#hftransformersnlp) component. + Must be used whenever the [LanguageModelFeaturizer](../components/featurizers.mdx#languagemodelfeaturizer) is used. + + + +* **Configuration** + + ```yaml + pipeline: + - name: "LanguageModelTokenizer" + # Flag to check whether to split intents + "intent_tokenization_flag": False + # Symbol on which intent should be split + "intent_split_symbol": "_" + ``` diff --git a/docs/docs/connectors/cisco-webex-teams.mdx b/docs/docs/connectors/cisco-webex-teams.mdx new file mode 100644 index 000000000000..76a3e2360f6d --- /dev/null +++ b/docs/docs/connectors/cisco-webex-teams.mdx @@ -0,0 +1,56 @@ +--- +id: cisco-webex-teams +sidebar_label: Cisco Webex Teams +title: Cisco Webex Teams +description: Build a Rasa Chat Bot on Cisco Webex +--- + +You first have to create a cisco webex app to get credentials. +Once you have them you can add these to your `credentials.yml`. + +## Getting Credentials + +**How to get the Cisco Webex Teams credentials:** + +You need to set up a bot. Check out the Cisco Webex for Developers +[documentation](https://developer.webex.com/docs/bots) for information +about how to create your bot. + +After you have created the bot through Cisco Webex Teams, you need to create a +room in Cisco Webex Teams. Then add the bot in the room the same way you would +add a person in the room. + +You need to note down the room ID for the room you created. This room ID will +be used in `room` variable in the `credentials.yml` file. + +Please follow this link below to find the room ID +`https://developer.webex.com/endpoint-rooms-get.html` + +## Running on Cisco Webex + +If you want to connect to the `webexteams` input channel using the run +script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +webexteams: + access_token: "YOUR-BOT-ACCESS-TOKEN" + room: "YOUR-CISCOWEBEXTEAMS-ROOM-ID" +``` + +The endpoint for receiving Cisco Webex Teams messages is +`http://localhost:5005/webhooks/webexteams/webhook`, replacing +the host and port with the appropriate values. This is the URL +you should add in the OAuth & Permissions section. + +:::note +If you do not set the `room` keyword +argument, messages will by delivered back to +the user who sent them. + +::: diff --git a/docs/docs/connectors/custom-connectors.mdx b/docs/docs/connectors/custom-connectors.mdx new file mode 100644 index 000000000000..a86b45599a17 --- /dev/null +++ b/docs/docs/connectors/custom-connectors.mdx @@ -0,0 +1,75 @@ +--- +id: custom-connectors +sidebar_label: Custom Connectors +title: Custom Connectors +description: Deploy and Run a Rasa Chat Bot on a custom chat interface +--- + +You can also implement your own custom channel. You can +use the `rasa.core.channels.rest.RestInput` class as a template. +The methods you need to implement are `blueprint` and `name`. The method +needs to create a sanic blueprint that can be attached to a sanic server. + +This allows you to add REST endpoints to the server that the external +messaging service can call to deliver messages. + +Your blueprint should have at least the two routes: `health` on `/`, +and `receive` on the HTTP route `/webhook`. + +The `name` method defines the url prefix. E.g. if your component is +named `myio`, the webhook you can use to attach the external service is: +`http://localhost:5005/webhooks/myio/webhook` (replacing the hostname +and port with your values). + +To send a message, you would run a command like: + +```bash +curl -XPOST http://localhost:5005/webhooks/myio/webhook \ + -d '{"sender": "user1", "message": "hello"}' \ + -H "Content-type: application/json" +``` + +where `myio` is the name of your component. + +If you need to use extra information from your front end in your custom +actions, you can add this information in the `metadata` dict of your user +message. This information will accompany the user message through the rasa +server into the action server when applicable, where you can find it stored in +the `tracker`. Message metadata will not directly affect NLU classification +or action prediction. If you want to change the way metadata is extracted for an +existing channel, you can overwrite the function `get_metadata`. The return value +of this method will be passed to the `UserMessage`. + +Here are all the attributes of `UserMessage`: + +- SKIPPED CLASS DOCUMENTATION -In your implementation of the `receive` endpoint, you need to make +sure to call `on_new_message(UserMessage(text, output, sender_id))`. +This will tell Rasa Core to handle this user message. The `output` +is an output channel implementing the `OutputChannel` class. You can +either implement the methods for your particular chat channel (e.g. there +are methods to send text and images) or you can use the +`CollectingOutputChannel` to collect the bot responses Core +creates while the bot is processing your messages and return +them as part of your endpoint response. This is the way the `RestInput` +channel is implemented. For examples on how to create and use your own output +channel, take a look at the implementations of the other +output channels, e.g. the `SlackBot` in `rasa.core.channels.slack`. + +To use a custom channel, you need to supply a credentials configuration file +`credentials.yml` with the command line argument `--credentials`. +This credentials file has to contain the module path of your custom channel and +any required configuration parameters. For example, this could look like: + +```yaml +mypackage.MyIO: + username: "user_name" + another_parameter: "some value" +``` + +Here is an example implementation for an input channel that receives the messages, +hands them over to Rasa Core, collects the bot utterances, and returns +these bot utterances as the json response to the webhook call that +posted the message to the channel: + +```python (docs/sources/rasa/core/channels/rest.py) +``` diff --git a/docs/docs/connectors/facebook-messenger.mdx b/docs/docs/connectors/facebook-messenger.mdx new file mode 100644 index 000000000000..5086decc14d6 --- /dev/null +++ b/docs/docs/connectors/facebook-messenger.mdx @@ -0,0 +1,137 @@ +--- +id: facebook-messenger +sidebar_label: Facebook Messenger +title: Facebook Messenger +description: Build a Rasa Chat Bot on Facebook Messenger +--- + +## Facebook Setup + +You first need to set up a facebook page and app to get credentials to connect to +Facebook Messenger. Once you have them you can add these to your `credentials.yml`. + +### Getting Credentials + +**How to get the Facebook credentials:** +You need to set up a Facebook app and a page. + +1. To create the app head over to + [Facebook for Developers](https://developers.facebook.com/) + and click on **My Apps** → **Add New App**. + +2. Go onto the dashboard for the app and under **Products**, + find the **Messenger** section and click **Set Up**. Scroll down to + **Token Generation** and click on the link to create a new page for your + app. + +3. Create your page and select it in the dropdown menu for the + **Token Generation**. The shown **Page Access Token** is the + `page-access-token` needed later on. + +4. Locate the **App Secret** in the app dashboard under **Settings** → **Basic**. + This will be your `secret`. + +5. Use the collected `secret` and `page-access-token` in your + `credentials.yml`, and add a field called `verify` containing + a string of your choice. Start `rasa run` with the + `--credentials credentials.yml` option. + +6. Set up a **Webhook** and select at least the **messaging** and + **messaging_postback** subscriptions. Insert your callback URL which will + look like `https://<YOUR_HOST>/webhooks/facebook/webhook`. Insert the + **Verify Token** which has to match the `verify` + entry in your `credentials.yml`. + +For more detailed steps, visit the +[Messenger docs](https://developers.facebook.com/docs/graph-api/webhooks). + +### Running On Facebook Messenger + +If you want to connect to Facebook using the run script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +facebook: + verify: "rasa-bot" + secret: "3e34709d01ea89032asdebfe5a74518" + page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" +``` + +The endpoint for receiving Facebook messenger messages is +`http://localhost:5005/webhooks/facebook/webhook`, replacing +the host and port with the appropriate values. This is the URL +you should add in the configuration of the webhook. + +## Supported response attachments + +In addition to typical text, image, and custom responses, the Facebook Messenger +channel supports the following additional response template attachments: + +* [Buttons](https://developers.facebook.com/docs/messenger-platform/send-messages/buttons) + are structured the same as other Rasa buttons. Facebook API limits the amount of + buttons you can sent in a message to 3. If more than 3 buttons are provided in a + message, Rasa will ignore all provided buttons. + +* [Quick Replies](https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies) + provide a way to present a set of up to 13 buttons in-conversation that contain a + title and optional image, and appear prominently above the composer. You can also + use quick replies to request a person's email address or phone number. + + ```yaml + utter_fb_quick_reply_example: + - text: Hello World! + quick_replies: + - title: Text quick reply + payload: /example_intent + - title: Image quick reply + payload: /example_intent + image_url: http://example.com/img/red.png + # below are Facebook provided quick replies + # the title and payload will be filled + # with the user's information from their profile + - content_type: user_email + title: + payload: + - content_type: user_phone_number + title: + payload: + ``` + +:::note +Both Quick Reply and Button titles in Facebook Messenger have a character limit of +20. Titles longer than 20 characters will be truncated. + +::: + +* [Elements](https://developers.facebook.com/docs/messenger-platform/send-messages/template/generic) + provide a way to create a horizontally scrollable list up to 10 content elements that + integrate buttons, images, and more alongside text a single message. + + ```yaml + utter_fb_element_example: + - text: Hello World! + elements: + - title: Element Title 1 + subtitle: Subtitles are supported + buttons: # note the button limit still applies here + - title: Example button A + payload: /example_intent + - title: Example button B + payload: /example_intent + - title: Example button C + payload: /example_intent + - title: Element Title 2 + image_url: http://example.com/img/red.png + buttons: + - title: Example button D + payload: /example_intent + - title: Example button E + payload: /example_intent + - title: Example button F + payload: /example_intent + ``` diff --git a/docs/docs/connectors/hangouts.mdx b/docs/docs/connectors/hangouts.mdx new file mode 100644 index 000000000000..475de1967e3f --- /dev/null +++ b/docs/docs/connectors/hangouts.mdx @@ -0,0 +1,67 @@ +--- +id: hangouts +sidebar_label: Google Hangouts Chat +title: Google Hangouts Chat +description: Build a Rasa Chat Bot on Google Hangouts Chat +--- + +## Hangouts Chat Setup + +This channel works similar to the standard Rasa REST channel. For each request from the channel, your bot will +send one response. The response will be displayed to the user either as text or a so-called card (for +more information, see the Cards section). + +In order to connect your Rasa bot to Google Hangouts Chat, you first need to create a project in +Google Developer Console that includes the Hangouts API. There you can specify your bot's endpoint +and also obtain your project id, which determines the scope for the OAuth2 authorization in case you +want to use OAuth2. The Hangouts Chat API sends a Bearer token with every request, but it is up to +the bot to actually verify the token, hence the channel also works without this. +For more information see the official Google resources [https://developers.google.com/hangouts/chat](https://developers.google.com/hangouts/chat). + +The possibility to implement asynchronous communication between Hangouts Chat and bot exists, but due +to the usually synchronous nature of Rasa bots, this functionality is not included in this channel. + +### Running On Hangouts Chat + +If you want to connect to Hangouts Chat using the run script, e.g. using: + +```bash +rasa run +``` + +you don't need to supply a `credentials.yml`. + +If you want to use OAuth2, simply put the project id obtained from the Google Developer Console into it. + +```yaml +hangouts: + project_id: "12345678901" +``` + +The endpoint for receiving Hangouts Chat messages is +`http://localhost:5005/webhooks/hangouts/webhook`, replacing +the host and port with the appropriate values. Hangouts Chat only forwards +messages to endpoints via `https`, so take appropriate measures to add +it to your setup. + +### Cards and Interactive Cards + +There are two ways in which Hangouts Chat will display bot messages, either as text or card. For each recevied +request, your bot will send all messages in one response. If one of those messages is a card (e.g. an image), +all other messages are converted to card format as well. + +Interactive cards trigger the `CARD_CLICKED` event for user interactions, e.g. when a button is clicked. When +creating an interactive card, e.g. via `dispatcher.utter_button_message()` in your `actions.py`, you can +specify a payload for each button that is going to be returned with the `CARD_CLICKED` event and extracted +by the `HangoutsInput` channel (for example +`buttons=[{"text":"Yes!", "payload":"/affirm"}, {"text":"Nope.", "payload":"/deny"}])`. +Updating cards is not yet supported. + +For more detailed information on cards, visit the +[Hangouts docs](https://developers.google.com/hangouts/chat/reference). + +### Other Hangouts Chat Events + +Except for `MESSAGE` and `CARD_CLICKED`, Hangouts Chat knows two other event types, `ADDED_TO_SPACE` and +`REMOVED_FROM_SPACE`, which are triggered when your bot is added or removed from a direct message or chat room +space. The default intent names for these events can be modified in the `HangoutsInput` constructor method. diff --git a/docs/docs/connectors/mattermost.mdx b/docs/docs/connectors/mattermost.mdx new file mode 100644 index 000000000000..f85e51371311 --- /dev/null +++ b/docs/docs/connectors/mattermost.mdx @@ -0,0 +1,66 @@ +--- +id: mattermost +sidebar_label: Mattermost +title: Mattermost +description: Build a Rasa Chat Bot on Mattermost +--- + +You first have to create a mattermost app to get credentials. +Once you have them you can add these to your `credentials.yml`. + +## Getting Credentials + +Mattermost now uses bot accounts for better security. So you can use their guide to create +your bot to get your token required for the credentials.yml file. + +For more information on creating a bot account please see +[Bot Creation](https://docs.mattermost.com/developer/bot-accounts.html#bot-account-creation). + +For information on converting existing user account into bot account please see +[User Conversion](https://docs.mattermost.com/developer/bot-accounts.html#how-do-i-convert-an-existing-account-to-a-bot-account). + +**How to set up the outgoing webhook:** + +1. To create the Mattermost outgoing webhook, login to your Mattermost + team site and go to **Main Menu > Integrations > Outgoing Webhooks**. + +2. Click **Add outgoing webhook**. + +3. Fill out the details including the channel you want the bot in. + You will need to ensure the **trigger words** section is set up + with `@yourbotname` so that the bot doesn't trigger on everything + that is said. + +4. The **Content Type** must be set to `application/json`. + +5. Make sure **trigger when** is set to value + **first word matches a trigger word exactly**. + +6. The callback url needs to be either your localhost address for Rasa, or your ngrok url where you + have your webhook running in Core or your public address, e.g. + `http://test.example.com/webhooks/mattermost/webhook` or `http://localhost:5005/webhooks/mattermost/webhook`. + +For more detailed steps, visit the +[Mattermost docs](https://docs.mattermost.com/guides/developer.html). + +## Running on Mattermost + +If you want to connect to the Mattermost input channel using the +run script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +mattermost: + url: "https://chat.example.com/api/v4" + token: "xxxxx" # the token for the bot account from creating the bot step. + webhook_url: "https://server.example.com/webhooks/mattermost/webhook" +``` + +The endpoint for receiving Mattermost channel messages +is `/webhooks/mattermost/webhook`, the same as `webhook_url` here. You should +add this url also in the Mattermost outgoing webhook. diff --git a/docs/docs/connectors/microsoft-bot-framework.mdx b/docs/docs/connectors/microsoft-bot-framework.mdx new file mode 100644 index 000000000000..26532d249c10 --- /dev/null +++ b/docs/docs/connectors/microsoft-bot-framework.mdx @@ -0,0 +1,26 @@ +--- +id: microsoft-bot-framework +sidebar_label: Microsoft Bot Framework +title: Microsoft Bot Framework +description: Build a Rasa Chat Bot on Microsoft Bot Framework +--- + +You first have to create a Microsoft app to get credentials. +Once you have them you can add these to your `credentials.yml`. + +## Running on Microsoft Bot Framework + +If you want to connect to the botframework input channel using the +run script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +botframework: + app_id: "MICROSOFT_APP_ID" + app_password: "MICROSOFT_APP_PASSWORD" +``` diff --git a/docs/docs/connectors/rocketchat.mdx b/docs/docs/connectors/rocketchat.mdx new file mode 100644 index 000000000000..dd7576dfc88d --- /dev/null +++ b/docs/docs/connectors/rocketchat.mdx @@ -0,0 +1,52 @@ +--- +id: rocketchat +sidebar_label: RocketChat +title: RocketChat +description: Build a Rasa Chat Bot on Rocketchat +--- + +## Getting Credentials + +**How to set up Rocket.Chat:** + +1. Create a user that will be used to post messages, and set its + credentials at credentials file. + +2. Create a Rocket.Chat outgoing webhook by logging in as admin to + Rocket.Chat and going to + **Administration > Integrations > New Integration**. + +3. Select **Outgoing Webhook**. + +4. Set **Event Trigger** section to value **Message Sent**. + +5. Fill out the details, including the channel you want the bot + listen to. Optionally, it is possible to set the + **Trigger Words** section with `@yourbotname` so that the bot + doesn't trigger on everything that is said. + +6. Set your **URLs** section to the Rasa URL where you have your + webhook running in Core or your public address with + `/webhooks/rocketchat/webhook`, e.g. + `http://test.example.com/webhooks/rocketchat/webhook`. + +For more information on the Rocket.Chat Webhooks, see the +[Rocket.Chat Guide](https://rocket.chat/docs/administrator-guides/integrations/). + +## Running on RocketChat + +If you want to connect to the Rocket.Chat input channel using the run +script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +rocketchat: + user: "yourbotname" + password: "YOUR_PASSWORD" + server_url: "https://demo.rocket.chat" +``` diff --git a/docs/docs/connectors/slack.mdx b/docs/docs/connectors/slack.mdx new file mode 100644 index 000000000000..08b0c1fce44d --- /dev/null +++ b/docs/docs/connectors/slack.mdx @@ -0,0 +1,85 @@ +--- +id: slack +sidebar_label: Slack +title: Slack +description: Build a Rasa Chat Bot on Slack +--- + +You first have to create a Slack app to get credentials. +Once you have them you can add these to your `credentials.yml`. + +## Getting Credentials + +**How to get the Slack credentials:** You need to set up a Slack app. + +1. To create the app go to [https://api.slack.com/apps](https://api.slack.com/apps) and click + on **Create New App**. + +2. Activate the following features: + +* Interactivity & Shortcuts > Interactivity (if your bot uses any [interactive components](https://api.slack.com/reference/block-kit/interactive-components) , e.g. buttons) + +* Add your Rasa request URL `http://<host>:<port>/webhooks/slack/webhook`, replacing + the host and port with the appropriate values that point to your Rasa X or Rasa Open Source deployment. + +* Event subscriptions > Subscribe to bot events: `message.channels`, `message.groups`, `message.im`, `message.mpim` + +* App Home > Always Show My Bot as Online + +1. Get the `Bot User OAuth Access Token` from the OAuth & Permissions page. Click `Install App to Workspace` + and allow access to your workspace. You will need + to provide this value in your credentials later in these instructions. It should start + with `xoxb`. + +2. In the “OAuth & Permissions > Redirect URLs” enter the endpoint for receiving Slack messages. This is + the same URL you entered above for Interactivity & Shortcuts - `http://<host>:<port>/webhooks/slack/webhook`. + +3. Go to the “Event Subscriptions” section, turn on the “Enable Events” and add the endpoint here also. + +For more detailed steps, visit the +[Slack API docs](https://api.slack.com/events-api). + +## Running on Slack + +If you want to connect to the slack input channel using the run +script, e.g. using: + +```bash +rasa run +``` + +Do not forget to run the action server if this is required by your bot, +e.g. using: + +```bash +rasa run actions +``` + +You need to supply a `credentials.yml` with the following content: + +* The `slack_channel` can be a channel or an individual person that the bot should listen to for communications, in + addition to the default behavior of listening for direct messages and app mentions, i.e. “@app_name”. To get the channel + id, right click on the channel choose Copy Link and the id will be the last component in the URL. + +* Use the entry for `Bot User OAuth Access Token` in the + “OAuth & Permissions” tab as your `slack_token`. It should start + with `xoxb`. + +* Optional: You can add a `proxy` through which to route outgoing traffic to the Slack API. Only HTTP proxies are currently supported. + +```yaml +slack: + slack_token: "xoxb-286425452756-safjasdf7sl38KLls" + slack_channel: "C011GR5D33F" + proxy: "http://myProxy.online" # Proxy Server to route your traffic through. This configuration is optional. + slack_retry_reason_header: "x-slack-retry-reason" # Slack HTTP header name indicating reason that slack send retry request. This configuration is optional. + slack_retry_number_header: "x-slack-retry-num" # Slack HTTP header name indicating the attempt number. This configuration is optional. + errors_ignore_retry: None # Any error codes given by Slack included in this list will be ignored. Error codes are listed [here](https://api.slack.com/events-api#errors). + use_threads: False # If set to True, bot responses will appear as a threaded message in Slack. This configuration is optional and set to False by default. +``` + +The endpoint for receiving slack messages is +`http://localhost:5005/webhooks/slack/webhook`, replacing +the host and port with the appropriate values. This is the URL +you should add in the “OAuth & Permissions” section as well as +the “Event Subscriptions”. diff --git a/docs/docs/connectors/telegram.mdx b/docs/docs/connectors/telegram.mdx new file mode 100644 index 000000000000..292927267ff9 --- /dev/null +++ b/docs/docs/connectors/telegram.mdx @@ -0,0 +1,43 @@ +--- +id: telegram +sidebar_label: Telegram +title: Telegram +description: Build a Rasa Chat Bot on Telegram +--- + +You first have to create a Telegram bot to get credentials. +Once you have them you can add these to your `credentials.yml`. + +## Getting Credentials + +**How to get the Telegram credentials:** +You need to set up a Telegram bot. + +1. To create the bot, go to [Bot Father](https://web.telegram.org/#/im?p=@BotFather), + enter `/newbot` and follow the instructions. + +2. At the end you should get your `access_token` and the username you + set will be your `verify`. + +3. If you want to use your bot in a group setting, it's advisable to + turn on group privacy mode by entering `/setprivacy`. Then the bot + will only listen when a user's message starts with `/bot`. + +For more information, check out the [Telegram HTTP API](https://core.telegram.org/bots/api). + +## Running on Telegram + +If you want to connect to telegram using the run script, e.g. using: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +telegram: + access_token: "490161424:AAGlRxinBRtKGb21_rlOEMtDFZMXBl6EC0o" + verify: "your_bot" + webhook_url: "https://your_url.com/webhooks/telegram/webhook" +``` diff --git a/docs/docs/connectors/twilio.mdx b/docs/docs/connectors/twilio.mdx new file mode 100644 index 000000000000..ebdbf56c380e --- /dev/null +++ b/docs/docs/connectors/twilio.mdx @@ -0,0 +1,60 @@ +--- +id: twilio +sidebar_label: Twilio +title: Twilio +description: Deploy a Rasa Open Source assistant through text message or WhatsApp via the Twilio connector +--- + +You can use the Twilio connector to deploy an assistant that is available over text message. + +## Getting Credentials + +You first have to create a Twilio app to get credentials. +Once you have them you can add these to your `credentials.yml`. + +**How to get the Twilio credentials:** +You need to set up a Twilio account. + +1. Once you have created a Twilio account, you need to create a new + project. The basic important product to select here + is `Programmable SMS`. + +2. Once you have created the project, navigate to the Dashboard of + `Programmable SMS` and click on `Get Started`. Follow the + steps to connect a phone number to the project. + +3. Now you can use the `Account SID`, `Auth Token`, and the phone + number you purchased in your `credentials.yml`. + +4. Configure your webhook URL by navigating to + [Phone Numbers](https://www.twilio.com/console/phone-numbers/incoming) in the Twilio + dashboard and selecting your phone number. Find the `Messaging` section and add + your webhook URL (e.g. `https://<host>:<port>/webhooks/twilio/webhook`, + replacing the host and port with your running Rasa X or Rasa Open Source server) + to the `A MESSAGE COMES IN` setting. + +For more information, see the [Twilio REST API](https://www.twilio.com/docs/iam/api). + +### Connecting to WhatsApp + +You can deploy a Rasa Open Source assistant to WhatsApp through Twilio. However, to do so, you have +to have a [WhatsApp Business](https://www.whatsapp.com/business/) profile. Associate +your Whatsapp Business profile with the phone number you purchased through Twilio to +access the [Twilio API for WhatsApp](https://www.twilio.com/docs/whatsapp/api). + +According to the [Twilio API documentation](https://www.twilio.com/docs/whatsapp/api#using-phone-numbers-with-whatsapp), +the phone number you use should be prefixed with whatsapp: in the `credentials.yml` described below. + +## Applying the Credentials + +Add the Twilio credentials to your `credentials.yml`: + +```yaml +twilio: + account_sid: "ACbc2dxxxxxxxxxxxx19d54bdcd6e41186" + auth_token: "e231c197493a7122d475b4xxxxxxxxxx" + twilio_number: "+440123456789" # if using WhatsApp: "whatsapp:+440123456789" +``` + +Make sure to restart your Rasa Open Source server or container to make changes to +which connectors are available. diff --git a/docs/docs/connectors/your-own-website.mdx b/docs/docs/connectors/your-own-website.mdx new file mode 100644 index 000000000000..c2b5e3a1ff90 --- /dev/null +++ b/docs/docs/connectors/your-own-website.mdx @@ -0,0 +1,124 @@ +--- +id: your-own-website +sidebar_label: Your Own Website +title: Your Own Website +description: Deploy and Run a Rasa Chat Bot on a Website +--- + +If you just want an easy way for users to test your bot, the best option +is usually the chat interface that ships with Rasa X, where you can [invite users +to test your bot](https://rasa.com/docs/rasa-x/user-guide/share-assistant/#share-your-bot). + +If you already have an existing website and want to add a Rasa assistant to it, +you can use [Chatroom](https://github.com/scalableminds/chatroom), a widget which you can incorporate into your existing webpage by adding a HTML snippet. +Alternatively, you can also build your own chat widget. + +## Websocket Channel + +The SocketIO channel uses websockets and is real-time. You need to supply +a `credentials.yml` with the following content: + +```yaml +socketio: + user_message_evt: user_uttered + bot_message_evt: bot_uttered + session_persistence: true/false +``` + +The first two configuration values define the event names used by Rasa Core +when sending or receiving messages over socket.io. + +By default, the socketio channel uses the socket id as `sender_id`, which causes +the session to restart at every page reload. `session_persistence` can be +set to `true` to avoid that. In that case, the frontend is responsible +for generating a session id and sending it to the Rasa Core server by +emitting the event `session_request` with `{session_id: [session_id]}` +immediately after the `connect` event. + +The example [Webchat](https://github.com/botfront/rasa-webchat) +implements this session creation mechanism (version >= 0.5.0). + +## REST Channels + +The `RestInput` and `CallbackInput` channels can be used for custom integrations. +They provide a URL where you can post messages and either receive response messages +directly, or asynchronously via a webhook. + +### RestInput + +The `rest` channel will provide you with a REST endpoint to post messages +to and in response to that request will send back the bots messages. +Here is an example on how to connect the `rest` input channel +using the run script: + +```bash +rasa run +``` + +you need to ensure your `credentials.yml` has the following content: + +```yaml +rest: + # you don't need to provide anything here - this channel doesn't + # require any credentials +``` + +After connecting the `rest` input channel, you can post messages to +`POST /webhooks/rest/webhook` with the following format: + +```json +{ + "sender": "Rasa", + "message": "Hi there!" +} +``` + +The response to this request will include the bot responses, e.g. + +```json +[ + {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} +] +``` + +### CallbackInput + +The `callback` channel behaves very much like the `rest` input, +but instead of directly returning the bot messages to the HTTP +request that sends the message, it will call a URL you can specify +to send bot messages. + +Here is an example on how to connect the +`callback` input channel using the run script: + +```bash +rasa run +``` + +you need to supply a `credentials.yml` with the following content: + +```yaml +callback: + # URL to which Core will send the bot responses + url: "http://localhost:5034/bot" +``` + +After connecting the `callback` input channel, you can post messages to +`POST /webhooks/callback/webhook` with the following format: + +```json +{ + "sender": "Rasa", + "message": "Hi there!" +} +``` + +The response will simply be `success`. Once Core wants to send a +message to the user, it will call the URL you specified with a `POST` +and the following `JSON` body: + +```json +[ + {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} +] +``` diff --git a/docs/docs/contextual-conversations.mdx b/docs/docs/contextual-conversations.mdx new file mode 100644 index 000000000000..dfc800642ec6 --- /dev/null +++ b/docs/docs/contextual-conversations.mdx @@ -0,0 +1,120 @@ +--- +id: contextual-conversations +sidebar_label: Complex Contextual Conversations +title: Complex Contextual Conversations +--- + +Not every user goal you define will fall under the category of business logic. For the +other cases you will need to use stories and context to help the user achieve their goal. + +If we take the example of the “getting started” skill from Sara, we want to give them +different information based on whether they've built an AI assistant before and are +migrating from a different tool etc. This can be done quite simply with stories and +the concept of [max history](./policies.mdx#max-history). + +```md + ## new to rasa + built a bot before + * how_to_get_started + - utter_getstarted + - utter_first_bot_with_rasa + * affirm + - action_set_onboarding + - slot{"onboarding": true} + - utter_built_bot_before + * affirm + - utter_ask_migration + * deny + - utter_explain_rasa_components + - utter_rasa_components_details + - utter_ask_explain_nlucorex + * affirm + - utter_explain_nlu + - utter_explain_core + - utter_explain_x + - utter_direct_to_step2 + + ## not new to rasa + core + * how_to_get_started + - utter_getstarted + - utter_first_bot_with_rasa + * deny + - action_set_onboarding + - slot{"onboarding": false} + - utter_ask_which_product + * how_to_get_started{"product": "core"} + - utter_explain_core + - utter_anything_else +``` + +The above example mostly leverages intents to guide the flow, however you can also +guide the flow with entities and slots. For example, if the user gives you the +information that they're new to Rasa at the beginning, you may want to skip this +question by storing this information in a slot. + +```md +* how_to_get_started{"user_type": "new"} + - slot{"user_type":"new"} + - action_set_onboarding + - slot{"onboarding": true} + - utter_getstarted_new + - utter_built_bot_before +``` + +For this to work, keep in mind that the slot has to be featurized in your `domain.yml` +file. This time we can use the `text` slot type, as we only care about whether the +[slot was set or not](./domain.mdx#slots/). + +## AugmentedMemoizationPolicy + +To make your bot more robust to interjections, you can replace the MemoizationPolicy +with the AugmentedMemoizationPolicy. It works the same way as the MemoizationPolicy, +but if no exact match is found it additionally has a mechanism that forgets a certain +amount of steps in the conversation history to find a match in your stories (read more +[here](policies.mdx#augmented-memoization-policy)) + +## Using ML to generalise + +Aside from the more rule-based policies we described above, Core also has some ML +policies you can use. These come in as an additional layer in your policy configuration, +and only jump in if the user follows a path that you have not anticipated. **It is important +to understand that using these policies does not mean letting go of control over your +assistant.** If a rule based policy is able to make a prediction, that prediction will +always have a higher priority (read more [here](./policies.mdx#action-selection)) and predict the next action. The +ML based policies give your assistant the chance not to fail, whereas if they are not +used your assistant will definitely fail, like in state machine based dialogue systems. + +These types of unexpected user behaviors are something our [TEDPolicy](https://blog.rasa.com/unpacking-the-ted-policy-in-rasa-open-source/) deals with +very well. It can learn to bring the user back on track after some +interjections during the main user goal the user is trying to complete. For example, +in the conversation below (extracted from a conversation on [Rasa X](https://rasa.com/docs/rasa-x/user-guide/review-conversations/)): + +```md +## Story from conversation with a2baab6c83054bfaa8d598459c659d2a on November 28th 2019 +* greet + - action_greet_user + - slot{"shown_privacy":true} +* ask_whoisit + - action_chitchat +* ask_whatspossible + - action_chitchat +* telljoke + - action_chitchat +* how_to_get_started{"product":"x"} + - slot{"product":"x"} + - utter_explain_x + - utter_also_explain_nlucore +* affirm + - utter_explain_nlu + - utter_explain_core + - utter_direct_to_step2 +``` + +Here we can see the user has completed a few chitchat tasks first, and then ultimately +asks how they can get started with Rasa X. The TEDPolicy correctly predicts that +Rasa X should be explained to the user, and then also takes them down the getting started +path, without asking all the qualifying questions first. + +Since the ML policy generalized well in this situation, it makes sense to add this story +to your training data to continuously improve your bot and help the ML generalize even +better in future. [Rasa X](https://rasa.com/docs/rasa-x/) is a tool that can help +you improve your bot and make it more contextual. diff --git a/docs/docs/conversation-driven-development.mdx b/docs/docs/conversation-driven-development.mdx new file mode 100644 index 000000000000..f6508a08d25e --- /dev/null +++ b/docs/docs/conversation-driven-development.mdx @@ -0,0 +1,71 @@ +--- +id: conversation-driven-development +sidebar_label: Conversation-Driven Development +title: Conversation-Driven Development +description: Find out about best practices for conversational AI using Conversation-Driven Development. +--- + +## What is CDD? + +Conversation-Driven Development (CDD) is the process of listening to your users and using those insights to improve your AI assistant. It is the overarching best practice approach for chatbot development. + +Developing great AI assistants is challenging because users will always say something you didn't anticipate. The principle behind CDD is that in every conversation users are telling you—in their own words—exactly what they want. By practicing CDD at every stage of bot development, you orient your assistant towards real user language and behavior. + +CDD includes the following actions: + +* **Share** your assistant with users as soon as possible +* **Review** conversations on a regular basis +* **Annotate** messages and use them as NLU training data +* **Test** that your assistant always behaves as you expect +* **Track** when your assistant fails and measure its performance over time +* **Fix** how your assistant handles unsuccessful conversations + +CDD is not a linear process; you'll circle back to the same actions over and over as you develop and improve your bot. + +Read more about these actions and the concept of CDD [here](https://blog.rasa.com/conversation-driven-development-a-better-approach-to-building-ai-assistants/). + +You can also check out [Rasa X](https://rasa.com/docs/rasa-x/), a purpose-built tool for CDD. + +## CDD in early stages of development + +If you're at the earliest stage of bot development, it might seem like CDD has no role to play - after all, you have no conversations yet! However, there are CDD actions you can take at the very beginning of bot development: + +1. See the best practices for [NLU data](generating-nlu-data.mdx) and [Stories](writing-stories.mdx) for details on creating training data with CDD in mind. +2. Give your bot to test users early on. + + CDD is all about listening to your users, so the earlier you find some, the better. + + Test users can be anyone who doesn't already know how your bot works from the inside. People on the bot development team should not be test users, since they know exactly what the bot can and can't do. Don't overinstruct your test users; they should have only as much knowledge of the bot's domain as your end users will have. + +3. Set up a CI/CD pipeline. + + CDD leads to frequent, smaller updates to your bot as you gather insights from bot conversations. [Setting up a CI/CD pipeline](setting-up-ci-cd.mdx) early on in development will enable you to act quickly on what you see in conversations. + + +At this stage, you can [install Rasa X in local mode](https://rasa.com/docs/rasa-x/installation-and-setup/installation-guide/#local-mode) to make it easier to share your bot with test users, collect conversations, and apply NLU and Story best practices based on the conversations you collect. + + +## CDD with a bot in production + +Once your bot is in production, you'll have more conversations to gain insights from. Then you can fully apply CDD actions. +At this stage, you can [install Rasa X on a server](https://rasa.com/docs/rasa-x/installation-and-setup/installation-guide/#helm-chart) +to both deploy your bot and enable CDD with a bot in production. + +* **Review**: Look in conversations for what users are really asking for. + + Your test users had at least some instruction about what the bot was intended to do; real users often either have no idea, or ignore instructions given to them. You can't cater to every unexpected user behavior, but you can try to address the main friction points you notice. Here are some things you could consider looking for: + + * Look at conversations where an “out_of_scope” intent or fallback behavior occurred. These could indicate a potential new skill, or just a misclassified user utterance. + * Look for user frustration, such as requests for transfer to a human. + +* **Annotate**: Continue to follow [best practices for NLU](generating-nlu-data.mdx) as you add new user utterances from real conversations to your training data. Be careful not to overfit your NLU model to utterances like those already in your training data. This can happen when you continuously add user utterances that were already predicted correctly and with high confidence to your training data. To avoid overfitting and help your model generalize to more diverse user utterances, add only user utterances that the model previously predicted incorrectly or with low confidence. + +* **Test:** Add successful user conversations to your [test conversations](testing-your-assistant.mdx). Doing this consistently will help ensure you don't introduce regressions as you make other fixes to your bot. + +* **Track:** Look for clues to success and failure to help you track your bot's performance. + + Some metrics are external to your bot. For example, if you are building a bot to relieve demand on a customer service call center, one metric for success could be the reduction in traffic to the call center. Others you can get directly from conversations, such as whether a user reaches a certain action that represents achieving the user goal. + + Automatically tracked metrics are by nature proxy metrics; the only way to get a true measure of success would be to individually review and rate every single conversation with your bot. While this clearly isn't realistic, just keep in mind that no metric is a perfect representation of your bot's performance, so don't really only on metrics to see where your bot needs improvement. + +* **Fix:** Continue to follow [best practices for Stories](writing-stories.mdx) as you expand and improve your bot's skills. Let user demand guide which skills you add and which fixes you make. Make smaller changes frequently rather than making big changes only once in a while. This will help you gauge the effectiveness of changes you're making, since you'll get user feedback more frequently. Your [CI/CD pipeline ](setting-up-ci-cd.mdx) should allow you to do so with confidence. diff --git a/docs/docs/custom-actions.mdx b/docs/docs/custom-actions.mdx new file mode 100644 index 000000000000..bd25ae9ef6b2 --- /dev/null +++ b/docs/docs/custom-actions.mdx @@ -0,0 +1,86 @@ +--- +id: custom-actions +sidebar_label: Custom Actions +title: Custom Actions +--- + +An action can run any code you want. Custom actions can turn on the lights, +add an event to a calendar, check a user's bank balance, or anything +else you can imagine. + +The `Action` class is the base class for any custom action. It has two methods +that both need to be overwritten, `name()` and `run()`. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="custom-action-example"></a> + +In a restaurant bot, if the user says “show me a Mexican restaurant”, +your bot could execute the action `ActionCheckRestaurants`, +which might look like this: + +```python +from rasa_sdk import Action +from rasa_sdk.events import SlotSet + +class ActionCheckRestaurants(Action): + def name(self) -> Text: + return "action_check_restaurants" + + def run(self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: + + cuisine = tracker.get_slot('cuisine') + q = "select * from restaurants where cuisine='{0}' limit 1".format(cuisine) + result = db.query(q) + + return [SlotSet("matches", result if result is not None else [])] +``` + +You should add the action name `action_check_restaurants` to +the actions in your domain file. The action's `run()` method receives +three arguments. You can access the values of slots and the latest message +sent by the user using the `tracker` object, and you can send messages +back to the user with the `dispatcher` object, by calling +`dispatcher.utter_message`. + +Details of the `run()` method: + + +#### Action.run +``async Action.run(dispatcher, tracker, domain):`` + +Execute the side effects of this action. + + +* **Parameters** + + * **dispatcher** – the dispatcher which is used to + send messages back to the user. Use + `dispatcher.utter_message()` or any other + `rasa_sdk.executor.CollectingDispatcher` + method. + + * **tracker** – the state tracker for the current + user. You can access slot values using + `tracker.get_slot(slot_name)`, the most recent user message + is `tracker.latest_message.text` and any other + `rasa_sdk.Tracker` property. + + * **domain** – the bot's domain + + + +* **Returns** + + A dictionary of `rasa_sdk.events.Event` instances that is + + returned through the endpoint + + + + +* **Return type** + + `List`[`Dict`[`str`, `Any`]] + diff --git a/docs/docs/default-actions.mdx b/docs/docs/default-actions.mdx new file mode 100644 index 000000000000..a7c3f69dd65a --- /dev/null +++ b/docs/docs/default-actions.mdx @@ -0,0 +1,95 @@ +--- +id: default-actions +sidebar_label: Default Actions +title: Default Actions +--- + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="default-actions"></a> + +## Default Actions + +The available default actions are: + +| | | +|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`action_listen` |Stop predicting more actions and wait for user input. | +|`action_restart` |Reset the whole conversation. Can be triggered during a conversation by entering `/restart` if the [Mapping Policy](./policies.mdx#mapping-policy) is included in the policy configuration. | +|`action_session_start` |Start a new conversation session. Take all set slots, mark the beginning of a new conversation session and re-apply the existing `SlotSet` events. This action is triggered automatically after an inactivity period defined by the `session_expiration_time` parameter in the domain's [Session configuration](./domain.mdx#session-config). Can be triggered manually during a conversation by entering `/session_start`. All conversations begin with an `action_session_start`. | +|`action_default_fallback` |Undo the last user message (as if the user did not send it and the bot did not react) and utter a message that the bot did not understand. See [Fallback Policy](./policies.mdx#fallback-policy). | +|`action_deactivate_form` |Deactivate the active form and reset the requested slot. See also See also [Handling unhappy paths](./forms.mdx#writing-stories--rules-for-unhappy-form-paths). | +|`action_revert_fallback_events` |Revert events that occurred during the [TwoStageFallbackPolicy](./policies.mdx#two-stage-fallback-policy). | +|`action_default_ask_affirmation` |Ask the user to affirm their intent. It is suggested to overwrite this default action with a custom action to have more meaningful prompts. | +|`action_default_ask_rephrase` |Ask the user to rephrase their intent. | +|`action_back` |Undo the last user message (as if the user did not send it and the bot did not react). Can be triggered during a conversation by entering `/back` if the MappingPolicy is included in the policy configuration. | + +All the default actions can be overridden. To do so, add the action name +to the list of actions in your domain: + +```yaml +actions: +- action_default_ask_affirmation +``` + +Rasa will then call your action endpoint and treat it as every other +custom action. + +## Customizing Default Actions + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="custom-session-start"></a> + +In order to overwrite a default action, write a custom action whose `name()` method +returns the same name as the default action in the table above. +Explicitly add this action to the `actions:` section of your domain file to tell Rasa +to use your custom action instead of the default action when that action name is called. + +### Customizing the session start action + +The default behavior of the session start action is to take all existing slots and to +carry them over into the next session. Let's say you do not want to carry over all +slots, but only a user's name and their phone number. To do that, you'd override the +`action_session_start` with a custom action that might look like this: + +```python +from typing import Text, List, Dict, Any + +from rasa_sdk import Action, Tracker +from rasa_sdk.events import SlotSet, SessionStarted, ActionExecuted, EventType +from rasa_sdk.executor import CollectingDispatcher + + +class ActionSessionStart(Action): + def name(self) -> Text: + return "action_session_start" + + @staticmethod + def fetch_slots(tracker: Tracker) -> List[EventType]: + """Collect slots that contain the user's name and phone number.""" + + slots = [] + + for key in ("name", "phone_number"): + value = tracker.get_slot(key) + if value is not None: + slots.append(SlotSet(key=key, value=value)) + + return slots + + async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[EventType]: + + # the session should begin with a `session_started` event + events = [SessionStarted(metadata=self.metadata)] + + # any slots that should be carried over should come after the + # `session_started` event + events.extend(self.fetch_slots(tracker)) + + # an `action_listen` should be added at the end as a user message follows + events.append(ActionExecuted("action_listen")) + + return events +``` diff --git a/docs/docs/docker/building-in-docker.mdx b/docs/docs/docker/building-in-docker.mdx new file mode 100644 index 000000000000..92f02f2d8260 --- /dev/null +++ b/docs/docs/docker/building-in-docker.mdx @@ -0,0 +1,277 @@ +--- +id: building-in-docker +sidebar_label: Building a Rasa Assistant in Docker +title: Building a Rasa Assistant in Docker +description: Learn how to build a Rasa assistant in Docker. +--- +<!-- this file is version specific, do not use `@site/...` syntax --> +import variables from '../variables.json'; + +If you don't have a Rasa project yet, you can build one in Docker without having to install Rasa Open Source +on your local machine. If you already have a model you're satisfied with, see +[Deploying Your Rasa Assistant](../how-to-deploy.mdx#deploying-your-rasa-assistant) to learn how to deploy your model. + +## Installing Docker + +If you're not sure if you have Docker installed, you can check by running: + +```bash +docker -v +# Docker version 18.09.2, build 6247962 +``` + +If Docker is installed on your machine, the output should show you your installed +versions of Docker. If the command doesn't work, you'll have to install Docker. +See [Docker Installation](https://docs.docker.com/install/) for details. + +## Setting up your Rasa Project + +Just like starting a project from scratch, you'll use the `rasa init` command to create a project. +The only difference is that you'll be running Rasa inside a Docker container, using +the image `rasa/rasa`. To initialize your project, run: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -v $(pwd):/app rasa/rasa:${variables.release}-full init --no-prompt`}</code></pre> + +What does this command mean? + +<ul> + <li><inlineCode>-v $(pwd):/app</inlineCode> mounts your current working directory to the working directory + in the Docker container. This means that files you create on your computer will be + visible inside the container, and files created in the container will + get synced back to your computer.</li> + <li><inlineCode>rasa/rasa</inlineCode> is the name of the docker image to run. '{variables.release}-full' is the name of the tag, + which specifies the version and dependencies.</li> + + <li>the Docker image has the <inlineCode>rasa</inlineCode> command as its entrypoint, which means you don't + have to type <inlineCode>rasa init</inlineCode>, just <inlineCode>init</inlineCode> is enough.</li> +</ul> + +Running this command will produce a lot of output. What happens is: + +* a Rasa project is created + +* an initial model is trained using the project's training data. + +To check that the command completed correctly, look at the contents of your working directory: + +```bash +ls -1 +``` + +The initial project files should all be there, as well as a `models` directory that contains your trained model. + +:::note +If you run into permission errors, it may be because the `rasa/rasa` images +run as user `1001` as a best practice, to avoid giving the container `root` permissions. +Hence, all files created by these containers will be owned by user `1001`. See the [Docker documentation](https://docs.docker.com/edge/engine/reference/commandline/run/) +if you want to run the containers as a different user. + +::: + +## Talking to Your Assistant + +To talk to your newly-trained assistant, run this command: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -it -v $(pwd):/app rasa/rasa:${variables.release}-full shell`}</code></pre> + +This will start a shell where you can chat to your assistant. +Note that this command includes the flags `-it`, which means that you are running +Docker interactively, and you are able to give input via the command line. +For commands which require interactive input, like `rasa shell` and `rasa interactive`, +you need to pass the `-it` flags. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="model-training-docker"></a> + +## Training a Model + +If you edit the NLU or Core training data or edit the `config.yml` file, you'll need to +retrain your Rasa model. You can do so by running: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -v $(pwd):/app rasa/rasa:${variables.release}-full train --domain domain.yml --data data --out models`}</code></pre> + +Here's what's happening in that command: + +<ul> + <li><inlineCode>-v $(pwd):/app</inlineCode>: Mounts your project directory into the Docker + container so that Rasa can train a model on your training data</li> + + <li>rasa/rasa:{variables.release}-full: Use the Rasa image with the tag '{variables.release}-full'</li> + + <li><inlineCode>train</inlineCode>: Execute the <inlineCode>rasa train</inlineCode> command within the container. For more + information see <a href="/command-line-interface">Command Line Interface</a>.</li> +</ul> + +In this case, we've also passed values for the location of the domain file, training +data, and the models output directory to show how these can be customized. +You can also leave these out, since we are passing the default values. + +## Customizing your Model + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="choosing-a-tag"></a> + +### Choosing a Tag + +<p> + All <inlineCode>rasa/rasa</inlineCode> image tags start with a version number. The current version is {variables.release}. The tags are: +</p> + +* `{version}` + +* `{version}-full` + +* `{version}-spacy-en` + +* `{version}-spacy-de` + +* `{version}-mitie-en` + +The `{version}-full` tag includes all possible pipeline dependencies, allowing you to change your `config.yml` +as you like without worrying about missing dependencies. The plain `{version}` tag includes all the +dependencies you need to run the default pipeline created by `rasa init`. + +To keep images as small as possible, we also publish different tags of the `rasa/rasa` image +with different dependencies installed. See [Tuning Your Model](../tuning-your-model.mdx) for more dependency information +specific to your pipeline. For example, if you are using components with pre-trained word vectors from spaCy or +MITIE, you should choose the corresponding tag. + +If your model has a dependency that is not included in any of the tags (for example, a different spaCy language model), +you can build a docker image that extends the `rasa/rasa` image. + +:::note +You can see a list of all the versions and tags of the Rasa Open Source +Docker image on [DockerHub](https://hub.docker.com/r/rasa/rasa/). + +::: + +:::caution +The `latest` tags correspond to the current master build. These tags are not recommended for use, +as they are not guaranteed to be stable. + +::: + +### Adding Custom Components + +If you are using a custom NLU component or policy in your `config.yml`, you have to add the module file to your +Docker container. You can do this by either mounting the file or by including it in your +own custom image (e.g. if the custom component or policy has extra dependencies). Make sure +that your module is in the Python module search path by setting the +environment variable `PYTHONPATH=$PYTHONPATH:<directory of your module>`. + +### Adding Custom Actions + +To create more sophisticated assistants, you will want to use [Custom Actions](../actions.mdx#custom-actions). +Continuing the example from above, you might want to add an action which tells +the user a joke to cheer them up. + +Start by creating the custom actions in a directory `actions` in your working directory: + +```bash +mkdir actions +mv actions.py actions/actions.py +# Rasa SDK expects a python module. +# Therefore, make sure that you have this file in the directory. +touch actions/__init__.py +``` + +Then build a custom action using the Rasa SDK by editing `actions/actions.py`, for example: + +```python +import requests +import json +from rasa_sdk import Action + + +class ActionJoke(Action): + def name(self): + return "action_joke" + + def run(self, dispatcher, tracker, domain): + request = requests.get('http://api.icndb.com/jokes/random').json() # make an api call + joke = request['value']['joke'] # extract a joke from returned json response + dispatcher.utter_message(text=joke) # send the message back to the user + return [] +``` + +In `data/stories.md`, replace `utter_cheer_up` in with the custom action `action_joke` +tell your bot to use this new action. + +In `domain.yml`, add a section for custom actions, including your new action: + +```yaml +actions: + - action_joke +``` + +After updating your domain and stories, you have to retrain your model: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -v $(pwd):/app rasa/rasa:${variables.release}-full train`}</code></pre> + +Your actions will run on a separate server from your Rasa server. First create a network to connect the two containers: + +```bash +docker network create my-project +``` + +You can then run the actions with the following command: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -d -v $(pwd)/actions:/app/actions --net my-project --name action-server rasa/rasa-sdk:${variables.rasa_sdk_version}`}</code></pre> + + +Here's what's happening in that command: + +* `-d`: Runs the container in detached mode so that you can run the rasa container in the same window. + +* `-v $(pwd):/app`: Mounts your project directory into the Docker + container so that the action server can run the code in the `actions` folder + +* `net my-project`: Run the server on a specific network so that the rasa container can find it + +* `--name action-server`: Gives the server a specific name for the rasa server to reference + +* <code>rasa/rasa-sdk:{variables.rasa_sdk_version}</code> : Uses the Rasa SDK image with the tag {`${variables.rasa_sdk_version}`} + +Because the action server is running in detached mode, if you want to stop the container, +do it with `docker stop action-server`. You can also run `docker ps` at any time to see all +of your currently running containers. + +To instruct the Rasa server to use the action server, you have to tell Rasa its location. +Add this endpoint to your `endpoints.yml`, referencing the `--name` you gave the server: + +```yaml +action_endpoint: + url: "http://action-server:5055/webhook" +``` + +Now you can talk to your bot again via the `shell` command: + +<pre><code parentName="pre" className="language-bash"> +{`docker run -it -v $(pwd):/app -p 5005:5005 --net my-project rasa/rasa:${variables.release}-full shell`}</code></pre> + +:::note +If you stop and restart the `action-server` container, you might see an error like this: + +``` +docker: Error response from daemon: Conflict. The container name "/action-server" is +already in use by container "f7ffc625e81ad4ad54cf8704e6ad85123c71781ca0a8e4b862f41c5796c33530". +You have to remove (or rename) that container to be able to reuse that name. +``` + +If that happens, it means you have a (stopped) container with the name already. You can remove it via: + +```bash +docker rm action-server +``` + +::: + +## Deploying your Assistant + +Work on your bot until you have a minimum viable assistant that can handle your happy paths. After +that, you'll want to deploy your model to get feedback from real test users. To do so, you can deploy the +model you created with Rasa X via one of our [recommended deployment methods](../how-to-deploy.mdx#recommended-deployment-methods). +Or, you can do a [Rasa-only deployment in Docker Compose](./deploying-in-docker-compose.mdx#deploying-rasa-in-docker-compose). diff --git a/docs/docs/docker/deploying-in-docker-compose.mdx b/docs/docs/docker/deploying-in-docker-compose.mdx new file mode 100644 index 000000000000..d61bd4b3d5c2 --- /dev/null +++ b/docs/docs/docker/deploying-in-docker-compose.mdx @@ -0,0 +1,138 @@ +--- +id: deploying-in-docker-compose +sidebar_label: Deploying a Rasa Open Source Assistant in Docker Compose +title: Deploying a Rasa Open Source Assistant in Docker Compose +description: Use Docker Compose to deploy a Rasa Open Source assistant +--- +<!-- this file is version specific, do not use `@site/...` syntax --> +import variables from '../variables.json'; + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="deploying-rasa-in-docker-compose"></a> + +If you would like to deploy your assistant without Rasa X, you can do so by deploying it in Docker Compose. +To deploy Rasa X and your assistant together, see the [Recommended Deployment Methods](../how-to-deploy.mdx#recommended-deployment-methods). + +## Installing Docker + +If you're not sure if you have Docker installed, you can check by running: + +```bash +docker -v && docker-compose -v +# Docker version 18.09.2, build 6247962 +# docker-compose version 1.23.2, build 1110ad01 +``` + +If Docker is installed on your machine, the output should show you your installed +versions of Docker and Docker Compose. If the command doesn't work, you'll have to +install Docker. +See [Docker Installation](https://docs.docker.com/install/) for details. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="docker-compose-configuring-channels"></a> + +## Configuring Channels + +To run your AI assistant in production, don't forget to configure your required +[Messaging and Voice Channels](../messaging-and-voice-channels.mdx) in `credentials.yml`. For example, to add a +REST channel, uncomment this section in the `credentials.yml`: + +```yaml +rest: + # you don't need to provide anything here - this channel doesn't + # require any credentials +``` + +The REST channel will open your bot up to incoming requests at the `/webhooks/rest/webhook` endpoint. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="running-multiple-services"></a> + +## Using Docker Compose to Run Multiple Services + +Docker Compose provides an easy way to run multiple containers together without +having to run multiple commands or configure networks. This is essential when you +want to deploy an assistant that also has an action server. + +Start by creating a file called `docker-compose.yml`: + +```bash +touch docker-compose.yml +``` + +Add the following content to the file: + +<pre><code parentName="pre" className="language-yaml"> +{`version: '3.0' +services: + rasa: + image: rasa/rasa:${variables.release}-full + ports: + - 5005:5005 + volumes: + - ./:/app + command: + - run`}</code></pre> + +The file starts with the version of the Docker Compose specification that you +want to use. +Each container is declared as a `service` within the `docker-compose.yml`. +The first service is the `rasa` service, which runs your Rasa server. + +To add the action server, add the image of your action server code. To learn how to deploy +an action server image, see [Building an Action Server Image](../how-to-deploy.mdx#building-an-action-server-image). + +<pre><code parentName="pre" className="language-yaml"> +{`version: '3.0' +services: + rasa: + image: rasa/rasa:${variables.release}-full + ports: + - 5005:5005 + volumes: + - ./:/app + command: + - run + app: + image: <image:tag> + expose: 5055`}</code></pre> + +The `expose: 5005` is what allows the `rasa` service to reach the `app` service on that port. +To instruct the `rasa` service to send its action requests to that endpoint, add it to your `endpoints.yml`: + +```yaml +action_endpoint: + url: http://app:5055/webhook +``` + +To run the services configured in your `docker-compose.yml` execute: + +```bash +docker-compose up +``` + +You should then be able to interact with your bot via requests to port 5005, on the webhook endpoint that +corresponds to a [configured channel](./deploying-in-docker-compose.mdx#docker-compose-configuring-channels): + +```bash +curl -XPOST http://localhost:5005/webhooks/rest/webhook \ + -H "Content-type: application/json" \ + -d '{"sender": "test", "message": "hello"}' +``` + +## Configuring a Tracker Store + +By default, all conversations are saved in memory. This means that all +conversations are lost as soon as you restart the Rasa server. +If you want to persist your conversations, you can use a different +[Tracker Store](../tracker-stores.mdx). + +To add a tracker store to a Docker Compose deployment, you need to add a new +service to your `docker-compose.yml` and modify the `endpoints.yml` to add +the new tracker store, pointing to your new service. More information about how +to do so can be found in the tracker store documentation: + +* [SQLTrackerStore](../tracker-stores.mdx#sql-tracker-store) + +* [RedisTrackerStore](../tracker-stores.mdx#redistrackerstore) + +* [MongoTrackerStore](../tracker-stores.mdx#mongotrackerstore) + +* [Custom Tracker Store](../tracker-stores.mdx#custom-tracker-store) diff --git a/docs/docs/domain.mdx b/docs/docs/domain.mdx new file mode 100644 index 000000000000..6c4bbc99da21 --- /dev/null +++ b/docs/docs/domain.mdx @@ -0,0 +1,460 @@ +--- +id: domain +sidebar_label: Domain +title: Domain +--- + +<!-- TODO: make sure all available options are documented for each parameter. --> + +The `Domain` defines the universe in which your assistant operates. +It specifies the `intents`, `entities`, `slots`, `responses` and `actions` +your bot should know about. It also defines a `session_config` to configure +conversation sessions. + +As an example, the domain created by `rasa init` has the following yaml definition: +<!-- TODO: use an example with all of the pieces! e.g. formbot? --> + +```yml (docs/sources/rasa/cli/initial_project/domain.yml) +``` + +## Intents + +<!-- TODO: intro info about intents --> + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="use-entities"></a> + +### Ignoring entities for certain intents + +If you want all entities to be ignored for certain intents, you can +add the `use_entities: []` parameter to the intent in your domain +file like this: + +```yaml +intents: + - greet: + use_entities: [] +``` + +To ignore some entities or explicitly take only certain entities +into account you can use this syntax: + +```yaml +intents: +- greet: + use_entities: + - name + - first_name + ignore_entities: + - location + - age +``` + +This means that excluded entities for those intents will be unfeaturized and therefore +will not impact the next action predictions. This is useful when you have +an intent where you don't care about the entities being picked up. If you list +your intents as normal without this parameter, the entities will be +featurized as normal. + +:::note +If you really want these entities not to influence action prediction we +suggest you make the slots with the same name of type `unfeaturized`. + +::: + +## Entities + +The `entities` section lists all entities +extracted by any [entity extractor](./components/entity-extractors.mdx) in your +NLU pipeline. + +For example: + +```yaml +entities: + - PERSON # entity extracted by SpacyEntityExtractor + - time # entity extracted by DucklingHTTPExtractor + - membership_type # custom entity extracted by CRFEntityExtractor + - priority # custom entity extracted by CRFEntityExtractor +``` + +## Slots + +Slots are your bot's memory. They act as a key-value store +which can be used to store information the user provided (e.g their home city) +as well as information gathered about the outside world (e.g. the result of a +database query). + +Most of the time, you want slots to influence how the dialogue progresses. +There are different slot types for different behaviors. + +For example, if your user has provided their home city, you might +have a `text` slot called `home_city`. If the user asks for the +weather, and you *don't* know their home city, you will have to ask +them for it. A `text` slot only tells Rasa Core whether the slot +has a value. The specific value of a `text` slot (e.g. Bangalore +or New York or Hong Kong) doesn't make any difference. + +If you just want to store some data, but don't want it to affect the flow +of the conversation, use an `unfeaturized` slot. +If the value itself is important, use the [slot type](./domain.mdx#slot-types) that +fits the type of behavior you want in your stories. + +Define your slot in your domain according to its slot type, following one +of the examples below. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="slot-types"></a> + +### Slot Types + +Make this a nice table instead. + +#### Text Slot + +* **Option** + + `text` + +* **Use For** + + User preferences where you only care whether or not they've + been specified. + + + +* **Example** + + ```yaml + slots: + cuisine: + type: text + ``` + + + +* **Description** + + Results in the feature of the slot being set to `1` if any value is set. + Otherwise the feature will be set to `0` (no value is set). + + +#### Boolean Slot + + +* **Option** + + `bool` + +* **Use For** + + True or False + + + +* **Example** + + ```yaml + slots: + is_authenticated: + type: bool + ``` + + + +* **Description** + + Checks if slot is set and if True + + +#### Categorical Slot + +* **Option** + + `categorical` + +* **Use For** + + Slots which can take one of N values + + + +* **Example** + + ```yaml + slots: + risk_level: + type: categorical + values: + - low + - medium + - high + ``` + + + +* **Description** + + Creates a one-hot encoding describing which of the `values` matched. + A default value `__other__` is automatically added to the user-defined + values. All values encountered which are not explicitly defined in the + domain are mapped to `__other__` for featurization. The value + `__other__` should not be used as a user-defined value; if it is, it + will still behave as the default to which all unseen values are mapped. + + +#### Float Slot + + +* **Option** + + `float` + +* **Use For** + + Continuous values + + + +* **Example** + + ```yaml + slots: + temperature: + type: float + min_value: -100.0 + max_value: 100.0 + ``` + + + +* **Defaults** + + `max_value=1.0`, `min_value=0.0` + + + +* **Description** + + All values below `min_value` will be treated as `min_value`, the same + happens for values above `max_value`. Hence, if `max_value` is set to + `1`, there is no difference between the slot values `2` and `3.5` in + terms of featurization (e.g. both values will influence the dialogue in + the same way and the model can not learn to differentiate between them). + + +#### List Slot + + +* **Option** + + `list` + +* **Use For** + + Lists of values + + + +* **Example** + + ```yaml + slots: + shopping_items: + type: list + ``` + + + +* **Description** + + The feature of this slot is set to `1` if a value with a list is set, + where the list is not empty. If no value is set, or the empty list is the + set value, the feature will be `0`. The **length of the list stored in + the slot does not influence the dialogue**. + + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="unfeaturized-slot"></a> + +#### Unfeaturized Slot + +* **Option** + + `unfeaturized` + +* **Use For** + + Data you want to store which shouldn't influence the dialogue flow + + + +* **Example** + + ```yaml + slots: + internal_user_id: + type: unfeaturized + ``` + + + +* **Description** + + There will not be any featurization of this slot, hence its value does + not influence the dialogue flow and is ignored when predicting the next + action the bot should run. + + +#### Custom Slot Types + +Maybe your restaurant booking system can only handle bookings +for up to 6 people. In this case you want the *value* of the +slot to influence the next selected action (and not just whether +it's been specified). You can do this by defining a custom slot class. + +In the code below, we define a slot class called `NumberOfPeopleSlot`. +The featurization defines how the value of this slot gets converted to a vector +to our machine learning model can deal with. +Our slot has three possible “values”, which we can represent with +a vector of length `2`. + +| | | +|--------|----------------| +|`(0,0)` |not yet set | +|`(1,0)` |between 1 and 6 | +|`(0,1)` |more than 6 | + +```python +from rasa.core.slots import Slot + +class NumberOfPeopleSlot(Slot): + + def feature_dimensionality(self): + return 2 + + def as_feature(self): + r = [0.0] * self.feature_dimensionality() + if self.value: + if self.value <= 6: + r[0] = 1.0 + else: + r[1] = 1.0 + return r +``` + +Now we also need some training stories, so that Rasa Core +can learn from these how to handle the different situations: + +```story +# story1 +... +* inform{"people": "3"} + - action_book_table +... +# story2 +* inform{"people": "9"} + - action_explain_table_limit +``` + +### Slot Auto-fill + +If your NLU model picks up an entity, and your domain contains a +slot with the same name, the slot will be set automatically. For example: + +```story +# story_01 +* greet{"name": "Ali"} + - slot{"name": "Ali"} + - utter_greet +``` + +In this case, you don't have to include the `- slot{}` part in the +story, because it is automatically picked up. + +To disable this behavior for a particular slot, you can set the +`auto_fill` attribute to `False` in the domain file: + +```yaml +slots: + name: + type: text + auto_fill: False +``` + +### Initial slot values + +You can provide an initial value for a slot in your domain file: + +```yaml +slots: + num_fallbacks: + type: float + initial_value: 0 +``` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="domain-responses"></a> + + +## Responses + +Responses are actions that simply send a message to a user without running any custom code or +returning events. These responses can be defined directly in the domain file and can include +rich content such as buttons and attachments. For more information on responses, +see [Responses](./responses) + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="session-config"></a> + +## Actions + +[Actions](./actions) are the things your bot can actually do. +For example, an action could: + +* respond to a user, + +* make an external API call, + +* query a database, or + +* just about anything! + +All custom actions should be listed in your domain, except responses which need not be listed +under `actions:` as they are already listed under `responses:`. + +## Session configuration + +A conversation session represents the dialogue between the assistant and the user. +Conversation sessions can begin in three ways: + +1. the user begins the conversation with the assistant, + +2. the user sends their first message after a configurable period of inactivity, or + +3. a manual session start is triggered with the `/session_start` intent message. + +You can define the period of inactivity after which a new conversation +session is triggered in the domain under the `session_config` key. +`session_expiration_time` defines the time of inactivity in minutes after which a +new session will begin. `carry_over_slots_to_new_session` determines whether +existing set slots should be carried over to new sessions. + +The default session configuration looks as follows: + +```yaml +session_config: + session_expiration_time: 60 # value in minutes, 0 means infinitely long + carry_over_slots_to_new_session: true # set to false to forget slots between sessions +``` + +This means that if a user sends their first message after 60 minutes of inactivity, a +new conversation session is triggered, and that any existing slots are carried over +into the new session. Setting the value of `session_expiration_time` to 0 means +that sessions will not end (note that the `action_session_start` action will still +be triggered at the very beginning of conversations). + +:::note +A session start triggers the default action `action_session_start`. Its default +implementation moves all existing slots into the new session. Note that all +conversations begin with an `action_session_start`. Overriding this action could +for instance be used to initialize the tracker with slots from an external API +call, or to start the conversation with a bot message. The docs on +[Customizing the session start action](./default-actions.mdx#custom-session-start) shows you how to do that. + +::: diff --git a/docs/docs/event-brokers.mdx b/docs/docs/event-brokers.mdx new file mode 100644 index 000000000000..442456b698dd --- /dev/null +++ b/docs/docs/event-brokers.mdx @@ -0,0 +1,252 @@ +--- +id: event-brokers +sidebar_label: Event Brokers +title: Event Brokers +description: Find out how open source chatbot framework Rasa allows you to stream events to a message broker. +--- + +An event broker allows you to connect your running assistant to other services that process the data coming +in from conversations. For example, you could [connect your live assistant to +Rasa X](https://rasa.com/docs/rasa-x/installation-and-setup/deploy#connect-rasa-deployment/) +to review and annotate conversations or forward messages to an external analytics +service. The event broker publishes messages to a message streaming service, +also known as a message broker, to forward Rasa [Events](./events.mdx) from the Rasa server to other services. + +## Format + +All events are streamed to the broker as serialized dictionaries every time +the tracker updates its state. An example event emitted from the `default` +tracker looks like this: + +```json +{ + "sender_id": "default", + "timestamp": 1528402837.617099, + "event": "bot", + "text": "what your bot said", + "data": "some data about e.g. attachments" + "metadata" { + "a key": "a value", + } +} +``` + +The `event` field takes the event's `type_name` (for more on event +types, check out the [Events](./events.mdx) docs). + +## Pika Event Broker + +The example implementation we're going to show you here uses +[Pika](https://pika.readthedocs.io) , the Python client library for +[RabbitMQ](https://www.rabbitmq.com). + +### Adding a Pika Event Broker Using the Endpoint Configuration + +You can instruct Rasa to stream all events to your Pika event broker by adding an `event_broker` section to your +`endpoints.yml`: + +```yml (docs/sources/data/test_endpoints/event_brokers/pika_endpoint.yml) +``` + +Rasa will automatically start streaming events when you restart the Rasa server. + +### Adding a Pika Event Broker in Python + +Here is how you add it using Python code: + +```python +from rasa.core.brokers.pika import PikaEventBroker +from rasa.core.tracker_store import InMemoryTrackerStore + +pika_broker = PikaEventBroker('localhost', + 'username', + 'password', + queues=['rasa_events']) + +tracker_store = InMemoryTrackerStore(domain=domain, event_broker=pika_broker) +``` + +### Implementing a Pika Event Consumer + +You need to have a RabbitMQ server running, as well as another application +that consumes the events. This consumer to needs to implement Pika's +`start_consuming()` method with a `callback` action. Here's a simple +example: + +```python +import json +import pika + + +def _callback(self, ch, method, properties, body): + # Do something useful with your incoming message body here, e.g. + # saving it to a database + print('Received event {}'.format(json.loads(body))) + +if __name__ == '__main__': + + # RabbitMQ credentials with username and password + credentials = pika.PlainCredentials('username', 'password') + + # Pika connection to the RabbitMQ host - typically 'rabbit' in a + # docker environment, or 'localhost' in a local environment + connection = pika.BlockingConnection( + pika.ConnectionParameters('rabbit', credentials=credentials)) + + # start consumption of channel + channel = connection.channel() + channel.basic_consume(_callback, + queue='rasa_events', + no_ack=True) + channel.start_consuming() +``` + +## Kafka Event Broker + +It is possible to use [Kafka](https://kafka.apache.org/) as main broker for your +events. In this example we are going to use the [python-kafka](https://kafka-python.readthedocs.io/en/master/usage.html) library, a Kafka client written in Python. + +### Adding a Kafka Event Broker Using the Endpoint Configuration + +You can instruct Rasa to stream all events to your Kafka event broker by adding an `event_broker` section to your +`endpoints.yml`. + +Using `SASL_PLAINTEXT` protocol the endpoints file must have the following entries: + +```yml (docs/sources/data/test_endpoints/event_brokers/kafka_plaintext_endpoint.yml) +``` + +If using SSL protocol, the endpoints file should look like: + +```yml (docs/sources/data/test_endpoints/event_brokers/kafka_ssl_endpoint.yml) +``` + +### Adding a Kafka Broker in Python + +The code below shows an example on how to instantiate a Kafka producer in you script. + +```python +from rasa.core.brokers.kafka import KafkaEventBroker +from rasa.core.tracker_store import InMemoryTrackerStore + +kafka_broker = KafkaEventBroker(host='localhost:9092', + topic='rasa_events') + +tracker_store = InMemoryTrackerStore(domain=domain, event_broker=kafka_broker) +``` + +The host variable can be either a list of brokers addresses or a single one. +If only one broker address is available, the client will connect to it and +request the cluster Metadata. +Therefore, the remain brokers in the cluster can be discovered +automatically through the data served by the first connected broker. + +To pass more than one broker address as argument, they must be passed in a +list of strings. e.g.: + +```python +kafka_broker = KafkaEventBroker(host=['kafka_broker_1:9092', + 'kafka_broker_2:2030', + 'kafka_broker_3:9092'], + topic='rasa_events') +``` + +### Authentication and Authorization + +Rasa's Kafka producer accepts two types of security protocols - `SASL_PLAINTEXT` and `SSL`. + +For development environment, or if the brokers servers and clients are located +into the same machine, you can use simple authentication with `SASL_PLAINTEXT`. +By using this protocol, the credentials and messages exchanged between the clients and servers +will be sent in plaintext. Thus, this is not the most secure approach, but since it's simple +to configure, it is useful for simple cluster configurations. +`SASL_PLAINTEXT` protocol requires the setup of the `username` and `password` +previously configured in the broker server. + +```python +kafka_broker = KafkaEventBroker(host='kafka_broker:9092', + sasl_plain_username='kafka_username', + sasl_plain_password='kafka_password', + security_protocol='SASL_PLAINTEXT', + topic='rasa_events') +``` + +If the clients or the brokers in the kafka cluster are located in different +machines, it's important to use ssl protocol to assure encryption of data and client +authentication. After generating valid certificates for the brokers and the +clients, the path to the certificate and key generated for the producer must +be provided as arguments, as well as the CA's root certificate. + +```python +kafka_broker = KafkaEventBroker(host='kafka_broker:9092', + ssl_cafile='CARoot.pem', + ssl_certfile='certificate.pem', + ssl_keyfile='key.pem', + ssl_check_hostname=True, + security_protocol='SSL', + topic='rasa_events') +``` + +If the `ssl_check_hostname` parameter is enabled, the clients will verify +if the broker's hostname matches the certificate. It's used on client's connections +and inter-broker connections to prevent man-in-the-middle attacks. + +### Implementing a Kafka Event Consumer + +The parameters used to create a Kafka consumer are the same used on the producer creation, +according to the security protocol being used. The following implementation shows an example: + +```python +from kafka import KafkaConsumer +from json import loads + +consumer = KafkaConsumer('rasa_events', + bootstrap_servers=['localhost:29093'], + value_deserializer=lambda m: json.loads(m.decode('utf-8')), + security_protocol='SSL', + ssl_check_hostname=False, + ssl_cafile='CARoot.pem', + ssl_certfile='certificate.pem', + ssl_keyfile='key.pem') + +for message in consumer: + print(message.value) +``` + +## SQL Event Broker + +It is possible to use an SQL database as an event broker. Connections to databases are established using +[SQLAlchemy](https://www.sqlalchemy.org/), a Python library which can interact with many +different types of SQL databases, such as [SQLite](https://sqlite.org/index.html), +[PostgreSQL](https://www.postgresql.org/) and more. The default Rasa installation allows connections to SQLite +and PostgreSQL databases, to see other options, please see the +[SQLAlchemy documentation on SQL dialects](https://docs.sqlalchemy.org/en/13/dialects/index.html). + +### Adding a SQL Event Broker Using the Endpoint Configuration + +To instruct Rasa to save all events to your SQL event broker, add an `event_broker` section to your +`endpoints.yml`. For example, a valid SQLite configuration +could look like the following: + +```yaml +event_broker: + type: SQL + dialect: sqlite + db: events.db +``` + +PostgreSQL databases can be used as well: + +```yaml +event_broker: + type: SQL + url: 127.0.0.1 + port: 5432 + dialect: postgresql + username: myuser + password: mypassword + db: mydatabase +``` + +With this configuration applied, Rasa will create a table called `events` on the database, +where all events will be added. diff --git a/docs/docs/events.mdx b/docs/docs/events.mdx new file mode 100644 index 000000000000..cc21f6b714b0 --- /dev/null +++ b/docs/docs/events.mdx @@ -0,0 +1,21 @@ +--- +id: events +sidebar_label: Events +title: Events +description: Use events in open source library Rasa Core to support functionalities like resetting slots, scheduling reminder or pausing a conversation. +--- + +TODO: This page needs to be rewritten for 2.0! +Ideally it will be sourced from the SDK repo instead (?) +We want to show the user, most importantly, how to use the +events from the SDK. Also helpful would be the JSON form of +the events that make it back to rasa (for example if you +write your own SDK) + +All of that info can be found here: [https://github.com/RasaHQ/rasa-sdk/blob/master/rasa_sdk/events.py](https://github.com/RasaHQ/rasa-sdk/blob/master/rasa_sdk/events.py) + +An action's `run()` method returns a list of events. For more information on +the different types of events, see [Events](./events). There is an example of a `SlotSet` event +[above](./actions.mdx#custom-action-example). The action itself will automatically be added to the +tracker as an `ActionExecuted` event. If the action should not trigger any +other events, it should return an empty list. diff --git a/docs/docs/fallback-handoff.mdx b/docs/docs/fallback-handoff.mdx new file mode 100644 index 000000000000..bbc03e94495f --- /dev/null +++ b/docs/docs/fallback-handoff.mdx @@ -0,0 +1,310 @@ +--- +id: fallback-handoff +sidebar_label: Fallback and Human Handoff +title: Fallback and Human Handoff +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; + +<!-- TODO: add info about human handoff --> + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="failing-gracefully"></a> + +Even if you design your bot perfectly, users will inevitably say things to your +assistant that you did not anticipate. In these cases, your assistant will fail, +and it's important you ensure it does so gracefully. + +## Fallback policy + +One of the most common failures is low NLU confidence, which is handled very nicely with +the TwoStageFallbackPolicy. You can enable it by adding the following to your configuration file, + +```yaml +policies: + - name: TwoStageFallbackPolicy + nlu_threshold: 0.8 +``` + +and adding the `out_of_scope` intent to your `domain.yml` file: + +```yaml +intents: +- out_of_scope +``` + +When the nlu confidence falls below the defined threshold, the bot will prompt the user to +rephrase their message. If the bot isn't able to get their message three times, there +will be a final action where the bot can e.g. hand off to a human. + +To try this out, retrain your model and send a message like “order me a pizza” to your bot: + +```bash +rasa train +rasa shell +``` + +There are also a bunch of ways in which you can customize this policy. In Sara, our demo bot, +we've customized it to suggest intents to the user within a certain confidence range to make +it easier for the user to give the bot the information it needs. + +This is done by customizing the action `ActionDefaultAskAffirmation` as shown in the +[Sara rasa-demo action server](https://github.com/RasaHQ/rasa-demo/blob/master/actions/actions.py#L443) +We define some intent mappings to make it more intuitive to the user what an intent means. + + +## Fallback Actions + +Sometimes you want to revert to a fallback action, such as replying, +`"Sorry, I didn't understand that"`. You can handle fallback cases by adding appropriate +rules. Rasa Open Source comes with two default implementations for handling these +fallbacks. +In addition, you can also use [Custom Actions](./actions.mdx#custom-actions) to run any +custom code. + +### Handling Low NLU Confidence + +Although Rasa's [Intent Classifier](./components/intent-classifiers.mdx) will +generalize to unseen messages, some +messages might receive a low classification confidence. +To handle messages with low confidence, we recommend adding the +[FallbackClassifier](./components/intent-classifiers.mdx#fallbackclassifier) to your NLU pipeline. +The [FallbackClassifier](./components/intent-classifiers.mdx#fallbackclassifier) will +predict an intent `nlu_fallback` when all other intent predictions fall below +the configured confidence threshold. + +#### Writing Stories / Rules for Messages with Low Confidence + +When you add the [FallbackClassifier](./components/intent-classifiers.mdx#fallbackclassifier) to +your NLU pipeline, you can treat +messages with low classification confidence as any other intent. The following +[Rule](./rules.mdx) will ask the user to rephrase when they send a message that is +classified with low confidence: + +```yaml +rules: +- rule: Ask the user to rephrase whenever they send a message with low NLU confidence + steps: + - intent: nlu_fallback + - action: utter_please_rephrase +``` + +Using [Rules](./rules.mdx) or [Stories](./stories.mdx) you can implement any desired +fallback behavior. + +#### Two-Stage-Fallback + +The `Two-Stage-Fallback` handles low NLU confidence in multiple stages +by trying to disambiguate the user input. + +##### Requirements + +* Add the [RulePolicy](./policies.mdx#rule-policy) to your policy configuration + before using the `Two-Stage-Fallback` +* Before using the `Two-Stage-Fallback`, make sure to add the + `out_of_scope` intent to your [Domain](./domain.mdx). + When users send messages with + the intent `out_of_scope` during the fallback (e.g. by pressing a button), + Rasa Open Source will know that the users denied the given intent suggestions. + +##### Usage + +- If an NLU prediction has a low confidence score, the user is asked to affirm + the classification of the intent. (Default action: + `action_default_ask_affirmation`) + + - If they affirm by sending a message with high NLU confidence (e.g. by pressing + a button), the story continues as if the intent was classified + with high confidence from the beginning. + - If they deny by sending a message with the intent `out_of_scope`, the user is + asked to rephrase their message. + +- Rephrasing (default action: `action_default_ask_rephrase`) + + - If the classification of the rephrased intent was confident, the story + continues as if the user had this intent from the beginning. + - If the rephrased intent was not classified with high confidence, the user + is asked to affirm the classified intent. + +- Second affirmation (default action: `action_default_ask_affirmation`) + + - If they affirm by sending a message with high NLU confidence (e.g. by pressing + a button), the story continues as if the user had this intent from the beginning. + - If the user denies by sending a message with the intent `out_of_scope`, the + original intent is classified as the specifies `deny_suggestion_intent_name`, + and an ultimate fallback action `fallback_nlu_action_name` is + triggered (e.g. a handoff to a human). + +Rasa Open Source provides default implementations for +`action_default_ask_affirmation` and `action_default_ask_rephrase`. +The default implementation of `action_default_ask_rephrase` utters +the response `utter_ask_rephrase`, so make sure to specify this +response in your domain file. +The implementation of both actions can be overwritten with +[Custom Actions](./actions.mdx#custom-actions). + +To use the `Two-Stage-Fallback` for messages with low NLU confidence, add the +following [Rule](./rules.mdx) to your training data. This rule will make sure that the +`Two-Stage-Fallback` will be activated whenever a message is received with +low classification confidence. + +```yaml +rules: +- rule: Implementation of the Two-Stage-Fallback + steps: + - intent: nlu_fallback + - action: two_stage_fallback + - active_loop: two_stage_fallback +``` + +### Handling Low Core Confidence + +As users might send unexpected messages, +it is possible that their behavior will lead them down unknown conversation paths. +Rasa's machine learning policies such as the [TED Policy](./policies.mdx#ted-policy) +are optimized to handle these unknown paths. + +To handle cases where even the machine learning policies can't predict the +next action with high confidence, make sure to add the +[Rule Policy](./policies.mdx#rule-policy) to your +policy configuration. The [Rule Policy](./policies.mdx#rule-policy) will predict a +default action if no [Policy](./policies.mdx) has a next action prediction with +confidence above a configurable threshold. + +You can configure the action that is run in case low of Core confidence as well as +the corresponding confidence threshold as follows: + +```yaml +policies: +- name: RulePolicy + # Confidence threshold for the `core_fallback_action_name` to apply. + # The action will apply if no other action was predicted with + # a confidence >= core_fallback_threshold + core_fallback_threshold: 0.4 + core_fallback_action_name: "action_default_fallback" + enable_fallback_prediction: True +``` + +:::note + +If you do not want the `Rule Policy` to predict a default action in case of low Core +confidence, specify `enable_fallback_prediction: False` in the configuration of the +policy. +::: + + +`action_default_fallback` is a default action in Rasa Open Source that sends the +`utter_default` response to the user. Make sure to specify +the `utter_default` in your domain file. It will also revert back to the +state of the conversation before the user message that caused the +fallback, so it will not influence the prediction of future actions. + +You can also create your own custom action to use as a fallback (see +[Custom Actions](./actions.mdx#custom-actions) for more info on custom actions). +The following snippet is an implementation of a custom action which does the same as +`action_default_fallback` but dispatches a different template +`my_custom_fallback_template`: + +```python +from typing import Any, Text, Dict, List + +from rasa_sdk import Action, Tracker +from rasa_sdk.events import UserUtteranceReverted +from rasa_sdk.executor import CollectingDispatcher + +class ActionDefaultFallback(Action): + """Executes the fallback action and goes back to the previous state + of the dialogue""" + + def name(self) -> Text: + return ACTION_DEFAULT_FALLBACK_NAME + + async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[Dict[Text, Any]]: + dispatcher.utter_message(template="my_custom_fallback_template") + + # Revert user message which led to fallback. + return [UserUtteranceReverted()] +``` + + + +<img alt="Intent Mappings" src={useBaseUrl("/img/intent_mappings.png")} width="240" /> + +## Out of scope intent + +It is good practice to also handle questions you know your users may ask, but for which you haven't necessarily implemented a user goal yet. + +You can define an `out_of_scope` intent to handle generic out of scope requests, like “I'm hungry” and have +the bot respond with a default message like “Sorry, I can't handle that request”: + +```md +* out_of_scope + utter_out_of_scope +``` + +We'll need to add NLU data for the `out_of_scope` intent as well: + +```md +## intent:out_of_scope +- I want to order food +- What is 2 + 2? +- Who's the US President? +- I need a job +``` + +And finally we'll add a response to our `domain.yml` file: + +```yaml +responses: + utter_out_of_scope: + - text: Sorry, I can't handle that request. +``` + +We can now re-train, and test this addition + +```bash +rasa train +rasa shell +``` + +Going one step further, if you observe your users asking for certain things, that you'll +want to turn into a user goal in future, you can handle these as separate intents, to let +the user know you've understood their message, but don't have a solution quite yet. E.g., +let's say the user asks “I want to apply for a job at Rasa”, we can then reply with +“I understand you're looking for a job, but I'm afraid I can't handle that skill yet.” + +```md +* ask_job + utter_job_not_handled +``` + +:::note +Here's a minimal checklist of files we modified to help our assistant fail gracefully: + +* `data/nlu.md`: + + * Add training data for the `out_of_scope` intent & any specific out of scope intents that you want to handle seperately + +* `data/stories.md`: + + * Add stories for any specific out of scope intents + +* `domain.yml`: + + * Add the `out_of_scope` intent & any specific out of scope intents + + * Add an `utter_out_of_scope` response & responses for any specific out of scope intents + +* `actions.py`: + + * Customize `ActionDefaultAskAffirmation` to suggest intents for the user to choose from + +* `config.yml`: + + * Add the TwoStageFallbackPolicy to the `policies` section + +::: diff --git a/docs/docs/forms.mdx b/docs/docs/forms.mdx new file mode 100644 index 000000000000..87ca6b517e0e --- /dev/null +++ b/docs/docs/forms.mdx @@ -0,0 +1,465 @@ +--- +id: forms +sidebar_label: Forms +title: Forms +description: Follow a rule-based process of information gathering using FormActions in open source bot framework Rasa. +--- + +One of the most common conversation patterns is to collect a few pieces of +information from a user in order to do something (book a restaurant, call an +API, search a database, etc.). This is also called **slot filling**. + +## Usage + +To use forms with Rasa Open Source you need to make sure that the +[Rule Policy](./policies.mdx#rule-policy) is added to your policy configuration. +For example: + +```yaml +policies: +- ... # other policies +- name: RulePolicy +``` + +### Defining a Form + +Define a form by adding it to the `forms` section in your [Domain](./domain.mdx). +The name of the form is also the name of the action which you can use in +[Stories](./stories.mdx) or [Rules](./rules.mdx) to handle form executions. Further you +need to define [Slot Mappings](./forms.mdx#slot-mappings) for each slot which your +form should fill. +You can specify one or more slot mappings for each slot to be filled. + +The following example of a form `your_form` will fill only one slot +`age` from an extracted entity `age`. + +```yaml +forms: + your_form: + age: + - type: from_entity + entity: age +``` + +Once the form action gets called for the first time, the form gets activated and will +prompt the user for the next required slot value. It does this by +looking for a [response](./responses.mdx) called +`utter_ask_{form_name}_{slot_name}` or `utter_ask_{slot_name}` if the former isn't +found. Make sure to define these responses in your domain file for +each required slot. + +### Activating a Form + +To activate a form you need to add a [Story](./stories.mdx) or [Rule](./rules.mdx), +which describes when the assistant should run the form. In the case a specific intent +triggering a form, you can for example use the following rule: + +```yaml +- rule: Activate form + steps: + - intent: intent_which_activates_form + - action: your_form + - active_loop: your_form +``` + +:::note +The `active_loop: your_form` step indicates that the form should be activated after +`your_form` was run. +::: + +### Deactivating a Form + +A form will automatically deactivate itself once all required slots are filled. +You can describe your assistant's behavior for the end of a form with a rule or a story. +If you don't add an applicable story or rule, the assistant will automatically listen +for the next user message after the form is finished. +The following example runs the utterance `utter_all_slots_filled` as soon as the form +`your_form` filled all required slots. + +```yaml +- rule: Submit form + condition: + # Condition that form is active. + - active_loop: your_form + steps: + # Form is deactivated + - action: your_form + - active_loop: null + - slot_was_set: + - requested_slot: null + # The action we want to run when the form is submitted. + - action: utter_all_slots_filled +``` + +Users might want to break out of a form early. Please see +[Writing Stories / Rules for Unhappy Form Paths](./forms.mdx#writing-stories--rules-for-unhappy-form-paths) on how to +write stories or rules for this case. + +### Slot Mappings + +Rasa Open Source comes with four predefined functions to fill the slots of a form +based on the latest user message. Please see +[Custom Slot Mappings](./forms.mdx#custom-slot-mappings) if you need a custom function +to extract the required information. + +#### from_entity + +The `from_entity` mapping fills slots based on extracted entities. +It will look for an entity called `entity_name` to fill a slot `slot_name`. +If `intent_name` is `None`, the slot will be filled regardless of intent name. +Otherwise, the slot will only be filled if the user's intent is `intent_name`. + +If `role_name` and/or `group_name` are provided, the role/group +label of the entity also needs to match the given values. The slot mapping will not +apply if the intent of the message is `excluded_intent`. Note that you can +also define lists of intents for the parameters `intent` and `not_intent`. + +```yaml +forms: + your_form: + slot_name: + - type: from_entity + entity: entity_name + role: role_name + group: group name + intent: intent_name + not_intent: excluded_intent +``` + +#### from_text + +The `from_text` mapping will use the text of the next user utterance to fill the slot +`slot_name`. If `intent_name` is `None`, the slot will be filled regardless of intent name. +Otherwise, the slot will only be filled if the user's intent is `intent_name`. + +The slot mapping will not apply if the intent of the message is `excluded_intent`. +Note that you can define lists of intents for the parameters `intent` and `not_intent`. + +```yaml +forms: + your_form: + slot_name: + - type: from_text + intent: intent_name + not_intent: excluded_intent +``` + +#### from_intent + +The `from_intent` mapping will fill slot `slot_name` with value `my_value` if +user intent is `intent_name` or `None`. The slot mapping will not +apply if the intent of the message is `excluded_intent`. Note that you can +also define lists of intents for the parameters `intent` and `not_intent`. + +:::note +The slot mapping will not apply during the initial activation of the form. To fill +a slot based on the intent that activated the form, use the `from_trigger_intent` +mapping. +::: + +```yaml +forms: + your_form: + slot_name: + - type: from_intent + value: my_value + intent: intent_name + not_intent: excluded_intent +``` + +#### from_trigger_intent + +The `from_trigger_intent` mapping will fill slot `slot_name` with value `my_value` +if the form was activated by a user message with intent `intent_name`. +The slot mapping will not apply if the intent of the message is +`excluded_intent`. Note that you can +also define lists of intents for the parameters `intent` and `not_intent`. + +```yaml +forms: + your_form: + slot_name: + - type: from_trigger_intent + value: my_value + intent: intent_name + not_intent: excluded_intent +``` + +### Writing Stories / Rules for Unhappy Form Paths + +Your users will not always respond with the information you ask of them. +Typically, users will ask questions, make chitchat, change their mind, or otherwise +stray from the happy path. The way this works with forms is that a form will raise +an `ActionExecutionRejection` if the user didn't provide the requested information. +You need to handle events that might cause `ActionExecutionRejection` errors +with rules or stories. For example, if you expect your users to chitchat with your bot, +you could add a story like this: + +```yaml +- rule: Example of an unhappy path + condition: + # Condition that form is active. + - active_loop: your_form + steps: + # This unhappy path handles the case of an intent `chitchat`. + - intent: chitchat + - action: utter_chitchat + # Return to form after handling the `chitchat` intent + - action: your_form + - active_loop: your_form +``` + +In some situations, users may change their mind in the middle of the form action +and decide not to go forward with their initial request. In cases like this, the +assistant should stop asking for the requested slots. You can handle such situations +gracefully using a default action `action_deactivate_form` which will deactivate +the form and reset the requested slot. An example story of such conversation could +look as follows: + +```yaml +- rule: Example of an unhappy path + condition: + # Condition that form is active. + - active_loop: your_form + steps: + - intent: stop + - action: utter_ask_continue + - intent: stop + - action: action_deactivate_form + - active_loop: null +``` + +It is **strongly** recommended that you build these rules or stories using +[interactive learning](./writing-stories.mdx#using-interactive-learning). +If you write these rules / stories by hand you will likely miss important +things. + +## Advanced Usage + +Forms are fully customizable using [Custom Actions](./actions.mdx#custom-actions). + +### Validating Form Input + +After extracting a slot value from user input, you can validate the extracted slots. +By default Rasa Open Source only validates if any slot was filled after requesting +a slot. If nothing is extracted from the user’s utterance for any of the required slots, +an `ActionExecutionRejection` error will be raised, meaning the action execution was +rejected and therefore Rasa Open Source will fall back onto a different policy to +predict another action. + +You can implement a [Custom Action](./actions.mdx#custom-actions) `validate_{form_name}` +to validate any extracted slots. Make sure to add this action to the `actions` +section of your domain: + +```yaml +actions: +- ... # other actions +- validate_your_form +``` + +When the form is executed it will run your custom action. In your custom action +you can either + +- validate already extracted slots. You can retrieve them from the tracker by running + `tracker.get_extracted_slots`. +- use [Custom Slot Mappings](./forms.mdx#slot-mappings) to extract slot values . + +After validating the extracted slots, return `SlotSet` events for them. If you want +to mark a slot as invalid return a `SlotSet` event which sets the value to `None`. +Note that if you don't return a `SlotSet` for an extracted slot, Rasa Open Source +will assume that the value is valid. + +The following example shows the implementation of a custom action +which validates that every extracted slot is valid. + +```python +from typing import Dict, Text, List, Any + +from rasa_sdk import Tracker +from rasa_sdk.events import EventType +from rasa_sdk.executor import CollectingDispatcher +from rasa_sdk import Action +from rasa_sdk.events import SlotSet + + +class ValidateSlots(Action): + def name(self) -> Text: + return "validate_your_form" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + extracted_slots: Dict[Text, Any] = tracker.get_extracted_slots() + + validation_events = [] + + for slot_name, slot_value in extracted_slots: + # Check if slot is valid. + if is_valid(slot_value): + validation_events.append(SlotSet(slot_name, slot_value)) + else: + # Return a `SlotSet` event with value `None` to indicate that this + # slot still needs to be filled. + validation_events.append(SlotSet(slot_name, None)) + + return validation_events + + def is_valid(slot_value: Any) -> bool: + # Implementation of the validate function. +``` + +### Custom Slot Mappings + +If none of the predefined [Slot Mappings](./forms.mdx#slot-mappings) fit your use +case, you can use the +[Custom Action](./actions.mdx#custom-actions) `validate_{form_name}` to write your own +extraction code. Rasa Open Source will trigger this function when the form is run. + +Make sure your custom action returns `SlotSet` events for every extracted value. +The following example shows the implementation of a custom slot mapping which sets +a slot based on the length of the last user message. + +```python +from typing import Dict, Text, List + +from rasa_sdk import Tracker +from rasa_sdk.events import EventType +from rasa_sdk.executor import CollectingDispatcher +from rasa_sdk import Action +from rasa_sdk.events import SlotSet + + +class ValidateSlots(Action): + def name(self) -> Text: + return "validate_your_form" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + text_of_last_user_message = tracker.latest_message.get("text") + + return [SlotSet("user_message_length", len(text_of_last_user_message))] +``` + +### Requesting Extra Slots + +If you make frequent changes to the required slots and don't want to retrain your +assistant when your form changes, you can also use a +[Custom Action](./actions.mdx#custom-actions) `validate_{form_name}` to define +which slots should be requested. Rasa Open Source will run your custom action whenever +the form validates user input. Set the slot `requested_slot` to the name of the slot +which should be extracted next. If all desired slots are filled, set `requested_slot` +to `None`. + +The following example shows the implementation of a custom action which requests +the three slots `last_name`, `first_name`, and `city`. + +```python +from typing import Dict, Text, List + +from rasa_sdk import Tracker +from rasa_sdk.events import EventType +from rasa_sdk.executor import CollectingDispatcher +from rasa_sdk import Action +from rasa_sdk.events import SlotSet + + +class ValidateSlots(Action): + def name(self) -> Text: + return "validate_your_form" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + required_slots = ["last name", "first_name", "city"] + + for slot_name in required_slots: + if tracker.slots.get(slot_name) is None: + # The slot is not filled yet. Request the user to fill this slot next. + return [SlotSet("requested_slot", slot_name)] + + # All slots are filled. + return [SlotSet("requested_slot", None)] +``` + +### The requested_slot slot + +The slot `requested_slot` is automatically added to the domain as an +unfeaturized slot. If you want to make it featurized, you need to add it +to your domain file as a categorical slot. You might want to do this if you +want to handle your unhappy paths differently, depending on what slot is +currently being asked from the user. For example, if your users respond +to one of the bot's questions with another question, like *why do you need to know that?* +The response to this `explain` intent depends on where we are in the story. +In the restaurant case, your stories would look something like this: + +```story +## explain cuisine slot +* request_restaurant + - restaurant_form + - active_loop{"name": "restaurant_form"} + - slot{"requested_slot": "cuisine"} +* explain + - utter_explain_cuisine + - restaurant_form + - slot{"cuisine": "greek"} + ( ... all other slots the form set ... ) + - active_loop{"name": null} + +## explain num_people slot +* request_restaurant + - restaurant_form + - active_loop{"name": "restaurant_form"} + - slot{"requested_slot": "num_people"} +* explain + - utter_explain_num_people + - restaurant_form + - slot{"cuisine": "greek"} + ( ... all other slots the form set ... ) + - active_loop{"name": null} +``` + +Again, it is **strongly** recommended that you use +[interactive learning](./writing-stories.mdx#using-interactive-learning) to build these stories. + +### Using a Custom Action to Ask For the Next Slot + +As soon as the form determines which slot has to be filled next by the user, it will +execute the action `utter_ask_{form_name}_{slot_name}` or `utter_ask_{slot_name}` +to ask the user to provide the necessary information. If a regular utterance is not +enough, you can also use a custom action `action_ask_{form_name}__{slot_name}` or +`action_ask_{slot_name}` to ask for the next slot. + +```python +from typing import Dict, Text, List + +from rasa_sdk import Tracker +from rasa_sdk.events import EventType +from rasa_sdk.executor import CollectingDispatcher +from rasa_sdk import Action + + +class AskForSlotAction(Action): + def name(self) -> Text: + return "action_ask_age" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + dispatcher.utter_message(text="How old are you?") + return [] +``` + +## Debugging + +The first thing to try is running your bot with the `--debug` flag, see +[Command Line Interface](./command-line-interface.mdx) for details. +If you are just getting started, you probably only have a few hand-written stories. +This is a great starting point, but +you should give your bot to people to test **as soon as possible**. One of the guiding principles +behind Rasa Core is: + +> Learning from real conversations is more important than designing hypothetical ones + +So don't try to cover every possibility in your hand-written stories before giving it to testers. +Real user behavior will always surprise you! diff --git a/docs/docs/generating-nlu-data.mdx b/docs/docs/generating-nlu-data.mdx new file mode 100644 index 000000000000..7baf2985d47e --- /dev/null +++ b/docs/docs/generating-nlu-data.mdx @@ -0,0 +1,193 @@ +--- +id: generating-nlu-data +sidebar_label: Generating NLU Data +title: Generating NLU Data +--- + +NLU (Natural Language Understanding) is the part of Rasa Open Source that performs +intent classification, entity extraction, and response retrieval. + +NLU will take in a sentence such as "I am looking for a French restaurant in the center +of town" and return structured data like: + +```json +{ + "intent": "search_restaurant", + "entities": { + "cuisine": "French", + "location": "center" + } +} +``` + +Building NLU models is hard, and building ones that are production-ready is even harder. +Here are some tips for designing your NLU training data and pipeline to get the most +out of your bot. + +## Conversation-Driven Development for NLU + +Conversation-Driven Development (CDD) means letting real user conversations guide your +development. For building a great NLU model, this means two key things: + +### Gather Real Data +When it comes to building out NLU training data, developers often rely on text +generation tools to quickly increase the number of training examples. While this +approach can be time-saving, it makes NLU models prone to overfit data that is not +representative of things a real user would say. + +To avoid such a problem, it is always a good idea to collect as much real user data +as possible to use as training data. Even though your bot will make mistakes initially, +the process of training & evaluating on user data will enable your model to generalize +much more effectively in real-world scenarios. + +### Share with Test Users Early + +In order to gather real data, you’re going to need real user messages. A bot developer +can only come up with a limited range of examples, and users will always surprise you +with what they say. This means you should share your bot with test users outside the +development team as early as possible. +See the full [CDD guidelines](./conversation-driven-development.mdx) for more details. + +## Avoiding Intent Confusion + +Intents are classified using character and word-level features extracted from your +training examples, depending on what [featurizers](./components/featurizers.mdx) +you've added to your NLU pipeline. When different intents contain the same +words ordered in a similar fashion, this can create confusion for the intent classifier. + +### Splitting on Entities vs Intents + +Intent confusion often occurs when you want your assistant's response to be conditioned on +information provided by the user. For example, +"How do I migrate to Rasa from IBM Watson?" versus "I want to migrate from Dialogflow." + +Since each of these messages will lead to a different response, your initial approach might be to create +separate intents for each migration type, e.g. `watson_migration` and `dialogflow_migration`. +However, these intents are trying to achieve the same goal (migrating to Rasa) and will +likely be phrased similarly, which may cause the model to confuse these intents. + +To avoid intent confusion, group these training examples into single `migration` intent and make +the response depend on the value of a categorical `product` slot that comes from an entity. +This also makes it easy to handle the case when no entity is provided, +e.g. "How do I migrate to Rasa?" For example: + +```yaml +stories: +- story: migrate from IBM Watson + steps: + - intent: migration + entities: + - product + - slot_was_set: + - product: Watson + - action: utter_watson_migration + +- story: migrate from Dialogflow + steps: + - intent: migration + entities: + - product + - slot_was_set: + - product: Dialogflow + - action: utter_dialogflow_migration + +- story: migrate from unspecified + steps: + - intent: migration + - action: utter_ask_migration_product +``` + +## Improving Entity Recognition + +With Rasa Open Source, you can define custom entities and annotate them in your training data +to teach your model to recognize them. Rasa Open Source also provides components +to extract pre-trained entities, as well as other forms of training data to help +your model recognize and process entities. + +### Pre-trained Entity Extractors + +Common entities such as names, addresses, and cities require a large amount of training +data for an NLU model to generalize effectively. + +Rasa Open Source provides two great options for +pre-trained extraction: [SpacyEntityExtractor](./components/entity-extractors/#SpacyEntityExtractor) +and [DucklingEntityExtractor](./components/entity-extractors/#DucklingHTTPExtractor). +Because these extractors have been pre-trained on a large corpus of data, you can use them +to extract the entities they support without annotating them in your training data. + +### Regexes + +Regexes are useful for performing entity extraction on structured patterns such as 5-digit +U.S. zip codes. Regex patterns can be used to generate features for the NLU model to learn, +or as a method of direct entity matching. +See [Regular Expression Features](./training-data-format/#regular-expression-features) +for more information. + +### Lookup Tables + +Lookup tables are processed as a regex pattern that checks if any of the lookup table +entries exist in the training example. Similar to regexes, lookup tables can be used +to provide features to the model to improve entity recognition, or used to perform +match-based entity recognition. Examples of useful applications of lookup tables are +flavors of ice cream, brands of bottled water, and even sock length styles +(see [Lookup Tables](./training-data-format/#lookup-tables)). + +### Synonyms + +Adding synonyms to your training data is useful for mapping certain entity values to a +single normalized entity. Synonyms, however, are not meant for improving your model's +entity recognition and have no effect on NLU performance. + +A good use case for synonyms is when normalizing entities belonging to distinct groups. +For example, in an assistant that asks users what insurance policies they're interested +in, they might respond with "my truck," "a car," or "I drive a batmobile." +It would be a good idea to map `truck`, `car`, and `batmobile` to the normalized value +`auto` so that the processing logic will only need to account for a narrow set of +possibilities (see [Entity Synonyms](./training-data-format/#entity-synonyms)). + +## Handling Edge Cases + +### Misspellings + +Coming across misspellings is inevitable, so your bot needs an effective way to +handle this. Keep in mind that the goal is not to correct misspellings, but to +correctly identify intents and entities. For this reason, while a spellchecker may +seem like an obvious solution, adjusting your featurizers and training data is often +sufficient to account for misspellings. + +Adding a character-level featurizer provides +an effective defense against spelling errors by accounting for parts of words, instead +of only whole words. You can add character level featurization to your pipeline by +using the `char_wb` analyzer for the `CountVectorsFeaturizer`, for example: + +```yaml +- name: CountVectorsFeaturizer + analyze: char_wb + min_ngram: 1 + max_ngram: 4 +``` + +In addition to character-level featurization, you can add common misspellings to +your training data. + +### Defining an Out-of-scope Intent + +It is always a good idea to define an `out_of_scope` intent in your bot to capture +any user messages outside of your bot's domain. When an `out_of_scope` intent is +identified, you can respond with messages such as "I'm not sure how to handle that, +here are some things you can ask me..." to gracefully guide the user towards a +supported skill. + +## Shipping Updates + +Treat your data like code. In the same way that you would never ship code updates +without reviews, updates to your training data should be carefully reviewed because +of the significant influence it can have on your model's performance. + +Use a version control system such as Github or Bitbucket to track changes to your +data and rollback updates when necessary. + +Be sure to build tests for your NLU models to [evaluate performance](./testing-your-assistant.mdx) as training data +and hyper-parameters change. Automate these tests in a [CI pipeline](./setting-up-ci-cd.mdx) such as Jenkins +or Git Workflow to streamline your development process and ensure that only +high-quality updates are shipped. diff --git a/docs/docs/glossary.mdx b/docs/docs/glossary.mdx new file mode 100644 index 000000000000..01815716a035 --- /dev/null +++ b/docs/docs/glossary.mdx @@ -0,0 +1,124 @@ +--- +id: glossary +sidebar_label: Rasa Glossary +title: Rasa Glossary +description: Glossary for all Rasa-related terms +--- + +## [Action](./actions.mdx) + + A single step that a bot takes in a conversation (e.g. calling an API or sending a response back to the user). + +## Annotation + + Adding labels to messages and conversations so that they can be used to train a model. + +## CMS + + A Content Management System (CMS) can be used to store bot responses externally instead of directly including it as part of the domain. This provides more flexibility in changing them as they are not tightly-coupled with the training data. + +## [Custom Action](./actions.mdx#custom-actions) + + An action written by a Rasa developer that can run arbitrary code mainly to interact with the outside world. + +## [Default Action](./actions.mdx#default-actions) + + A built-in action that comes with predefined functionality. + +## [Domain](./domain.mdx) + + Defines the inputs and outputs of an assistant. + + It includes a list of all the intents, entities, slots, actions, and forms that the assistant knows about. + +## [Entity](./domain.mdx#entities) + + Structured information that can be extracted from a user message. + + For example a telephone number, a person's name, a location, the name of a product + +## [Event](./events.mdx) + + All conversations in Rasa are represented as a sequence of events. For instance, a `UserUttered` represents a user entering a message, and an `ActionExecuted` represents the assistant executing an action. You can learn more about them [here](./events.mdx). + +## [Form](./forms.mdx) + + A type of custom action that asks the user for multiple pieces of information. + + For example, if you need a city, a cuisine, and a price range to recommend a restaurant, you can create a restaurant form to do that. You can describe any business logic inside a form. For example, if you want to ask for a particular neighbourhood if a user mentions a large city like Los Angeles, you can write that logic inside the form. + +## Happy / Unhappy Paths + + If your assistant asks a user for some information and the user provides it, we call that a happy path. Unhappy paths are all the possible edge cases of a bot. For example, the user refusing to give some input, changing the topic of conversation, or correcting something they said earlier. + +## Intent + + Something that a user is trying to convey or accomplish (e,g., greeting, specifying a location). + +## [Interactive Learning](./writing-stories.mdx#using-interactive-learning) + + A mode of training the bot where the user provides feedback to the bot while talking to it. + + This is a powerful way to write complicated stories by enabling users to explore what a bot can do and easily fix any mistakes it makes. + +## Minimum viable assistant + + A basic assistant that can handle the most important happy path stories. + +## NLG + + Natural Language Generation (NLG) is the process of generating natural language messages to send to a user. + + Rasa uses a simple template-based approach for NLG. Data-driven approaches (such as neural NLG) can be implemented by creating a custom NLG component. + +## Rasa NLU + + Natural Language Understanding (NLU) deals with parsing and understanding human language into a structured format. + + Rasa NLU is the part of Rasa that performs intent classification and entity extraction. + +## [Pipeline](./tuning-your-model.mdx) + + A Rasa bot's NLU system is defined by a pipeline, which is a list of NLU components (see “Rasa NLU Component”) in a particular order. A user input is processed by each component one by one before finally giving out the structured output. + +## [Policy](./policies.mdx) + + Policies make decisions on how conversation flow should proceed. At every turn, the policy which predicts the next action with the highest confidence will be used. A Core model can have multiple policies included, and the policy whose prediction has the highest confidence decides the next action to be taken. + +## Rasa Core + + The dialogue engine that decides on what to do next in a conversation based on the context. + +## Rasa NLU Component + + An element in the Rasa NLU pipeline (see “Pipeline”). + + Incoming messages are processed by a sequence of components called a pipeline. A component can perform tasks ranging from entity extraction to intent classification to pre-processing. + +## [Rules](./rules.mdx) + + Special training data to specify rule-like behavior of the assistant, such as + answering FAQs, filling [Forms](./forms.mdx), or handling + [Fallbacks](./fallback-handoff.mdx#fallbackactions). + +## [Slot](./domain.mdx#slots) + + A key-value store that Rasa uses to track information over the course of a conversation. + +## [Story](./stories.mdx) + + A conversation between a user and a bot annotated with the intent / entities of the users' messages as well as the sequence of actions to be performed by the bot + +## [Template / Response / Utterance](./responses.mdx) + + A message template that is used to respond to a user. This can include text, buttons, images, and other attachments. + +## User Goal + + A goal that a user wants to achieve. + + For example, a user may have the goal of booking a table at a restaurant. Another user may just want to make small talk. Sometimes, the user expresses their goal with a single message, e.g. “I want to book a table at a restaurant”. Other times the assistant may have to ask a few questions to understand how to help the user. Note: Many other places refer to the user goal as the “intent”, but in Rasa terminology, an intent is associated with every user message. + +## Word embedding / Word vector + + A vector of floating point numbers which represent the meaning of a word. Words which have similar meanings should have vectors which point in almost the same direction. Word embeddings are often used as an input to machine learning algorithms. diff --git a/docs/docs/how-to-deploy.mdx b/docs/docs/how-to-deploy.mdx new file mode 100644 index 000000000000..6a4831d5027e --- /dev/null +++ b/docs/docs/how-to-deploy.mdx @@ -0,0 +1,179 @@ +--- +id: how-to-deploy +sidebar_label: Deploying Your Assistant +title: Deploying Your Rasa Assistant +description: How to deploy your Rasa Assistant with Docker Compose or Kubernetes/Openshift +--- +<!-- this file is version specific, do not use `@site/...` syntax --> +import variables from './variables.json'; + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="deploying-your-rasa-assistant"></a> + +This page explains when and how to deploy an assistant built with Rasa. +It will allow you to make your assistant available to users and set you up with a production-ready environment. + +## When to Deploy Your Assistant + +The best time to deploy your assistant and make it available to test users is once it can handle the most +important happy paths or is what we call a [minimum viable assistant](./glossary.mdx). + +The recommended deployment methods described below make it easy to share your assistant +with test users via the [share your assistant feature in +Rasa X](https://rasa.com/docs/rasa-x/user-guide/share-assistant/#share-your-bot). +Then, when you're ready to make your assistant available via one or more [Messaging and Voice Channels](./messaging-and-voice-channels.mdx), +you can easily add them to your existing deployment set up. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="recommended-deployment-methods"></a> + +## Recommended Deployment Methods + +The recommended way to deploy an assistant is using either the Server Quick-Install or Helm Chart +options we support. Both deploy Rasa X and your assistant. They are the easiest ways to deploy your assistant, +allow you to use Rasa X to view conversations and turn them into training data, and are production-ready. +For more details on deployment methods see the [Rasa X Installation Guide](https://rasa.com/docs/rasa-x/installation-and-setup/installation-guide/). + +### Server Quick-Install + +The Server Quick-Install script is the easiest way to deploy Rasa X and your assistant. It installs a Kubernetes +cluster on your machine with sensible defaults, getting you up and running in one command. + +* Default: Make sure you meet the [OS Requirements](https://rasa.com/docs/rasa-x/installation-and-setup/install/quick-install-script/#hardware-os-requirements), + then run: + + ```bash + curl -s get-rasa-x.rasa.com | sudo bash +* Custom: See [Customizing the Script](https://rasa.com/docs/rasa-x/installation-and-setup/customize/#server-quick-install) + and the [Server Quick-Install docs](https://rasa.com/docs/rasa-x/installation-and-setup/install/quick-install-script) docs. + +### Helm Chart + +For assistants that will receive a lot of user traffic, setting up a Kubernetes or Openshift deployment via +our Helm charts is the best option. This provides a scalable architecture that is also straightforward to deploy. +However, you can also customize the Helm charts if you have specific requirements. + +* Default: Read the [Helm Chart Installation](https://rasa.com/docs/rasa-x/installation-and-setup/install/helm-chart/) docs. + +* Custom: Read the above, as well as the [Advanced Configuration](https://rasa.com/docs/rasa-x/installation-and-setup/customize/#helm-chart) + documentation, and customize the [open source Helm charts](https://github.com/RasaHQ/rasa-x-helm) to your needs. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="rasa-only-deployment"></a> + +## Alternative Deployment Methods + +### Docker Compose + +You can also run Rasa X in a Docker Compose setup, without the cluster environment. We have an install script +for doing so, as well as manual instructions for any custom setups. + +* Default: Read the [Docker Compose Install Script](https://rasa.com/docs/rasa-x/installation-and-setup/install/docker-compose/#docker-compose-install-script) docs or watch the [Masterclass Video](https://www.youtube.com/watch?v=IUYdwy8HPVc) on deploying Rasa X. + +* Custom: Read the [Docker Compose Manual Install](https://rasa.com/docs/rasa-x/installation-and-setup/install/docker-compose/#docker-compose-manual-install) documentation for full customization options. + +### Rasa Open Source Only Deployment + +It is also possible to deploy a Rasa assistant without Rasa X using Docker Compose. To do so, you can build your +Rasa Assistant locally or in Docker. Then you can deploy your model in Docker Compose. + +* [Building a Rasa Assistant Locally](./prototype-an-assistant) + +* [Building a Rasa Assistant in Docker](./docker/building-in-docker) + +* [Deploying a Rasa Open Source Assistant in Docker Compose](./docker/deploying-in-docker-compose) + + +## Deploying Your Action Server + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="building-an-action-server-image"></a> + +### Building an Action Server Image + +If you build an image that includes your action code and store it in a container registry, you can run it +as part of your deployment, without having to move code between servers. +In addition, you can add any additional dependencies of systems or Python libraries +that are part of your action code but not included in the base `rasa/rasa-sdk` image. + +To create your image: + +1. Move your actions code to a folder `actions` in your project directory. + Make sure to also add an empty `actions/__init__.py` file: + + ```bash + mkdir actions + mv actions.py actions/actions.py + touch actions/__init__.py # the init file indicates actions.py is a python module + ``` + + The `rasa/rasa-sdk` image will automatically look for the actions in `actions/actions.py`. + +2. If your actions have any extra dependencies, create a list of them in a file, + `actions/requirements-actions.txt`. + +3. Create a file named `Dockerfile` in your project directory, + in which you'll extend the official SDK image, copy over your code, and add any custom dependencies (if necessary). + For example: + + <pre><code parentName="pre" className="language-python"> + {`# Extend the official Rasa SDK image + FROM rasa/rasa-sdk:${variables.rasa_sdk_version} + + # Use subdirectory as working directory + WORKDIR /app + + # Copy any additional custom requirements, if necessary (uncomment next line) + # COPY actions/requirements-actions.txt ./ + + # Change back to root user to install dependencies + USER root + + # Install extra requirements for actions code, if necessary (uncomment next line) + # RUN pip install -r requirements-actions.txt + + # Copy actions folder to working directory + COPY ./actions /app/actions + + # By best practices, don't run the code with root user + USER 1001`}</code></pre> + +You can then build the image via the following command: + +```bash +docker build . -t <account_username>/<repository_name>:<custom_image_tag> +``` + +The `<custom_image_tag>` should reference how this image will be different from others. For +example, you could version or date your tags, as well as create different tags that have different code for production +and development servers. You should create a new tag any time you update your code and want to re-deploy it. + +### Using your Custom Action Server Image + +If you're building this image to make it available from another server, +for example a Rasa X or Rasa Enterprise deployment, you should push the image to a cloud repository. + +This documentation assumes you are pushing your images to [DockerHub](https://hub.docker.com/). +DockerHub will let you host multiple public repositories and +one private repository for free. Be sure to first [create an account](https://hub.docker.com/signup/) +and [create a repository](https://hub.docker.com/signup/) to store your images. You could also push images to +a different Docker registry, such as [Google Container Registry](https://cloud.google.com/container-registry), +[Amazon Elastic Container Registry](https://aws.amazon.com/ecr/), or +[Azure Container Registry](https://azure.microsoft.com/en-us/services/container-registry/). + +You can push the image to DockerHub via: + +```bash +docker login --username <account_username> --password <account_password> +docker push <account_username>/<repository_name>:<custom_image_tag> +``` + +To authenticate and push images to a different container registry, please refer to the documentation of +your chosen container registry. + +How you reference the custom action image will depend on your deployment. Pick the relevant documentation for +your deployment: + +* [Server Quick-Install](https://rasa.com/docs/rasa-x/installation-and-setup/customize/#quick-install-script-customizing) + +* [Helm Chart](https://rasa.com/docs/rasa-x/installation-and-setup/customize/#adding-a-custom-action-server) + +* [Docker Compose](https://rasa.com/docs/rasa-x/installation-and-setup/customize/#connecting-a-custom-action-server) + +* [Rasa Open Source Only](./docker/deploying-in-docker-compose#running-multiple-services) diff --git a/docs/docs/http-api-spec.mdx b/docs/docs/http-api-spec.mdx new file mode 100644 index 000000000000..0a78aa3ef888 --- /dev/null +++ b/docs/docs/http-api-spec.mdx @@ -0,0 +1,11 @@ +--- +id: http-api-spec +sidebar_label: API Spec +title: Rasa Open Source API Spec +hide_table_of_contents: true +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; +import Redoc from '@site/src/components/redoc'; + +<Redoc specUrl={useBaseUrl("/spec/rasa.yml")} /> diff --git a/docs/docs/http-api.mdx b/docs/docs/http-api.mdx new file mode 100644 index 000000000000..51b706977a67 --- /dev/null +++ b/docs/docs/http-api.mdx @@ -0,0 +1,111 @@ +--- +id: http-api +sidebar_label: Using the HTTP API +title: Using the HTTP API +description: Read about Rasa's HTTP API that has endpoints for conversations, training models, and configuring your bot. +--- + +## Enabling the HTTP API + +By default, running a Rasa server does not enable the API endpoints. Interactions +with the bot can happen over the exposed `webhooks/<channel>/webhook` endpoints. + +To enable the API for direct interaction with conversation trackers and other +bot endpoints, add the `--enable-api` parameter to your run command: + +```yaml +rasa run --enable-api +``` + +Note that you start the server with an NLU-only model, not all the available endpoints +can be called. Some endpoints will return a 409 status code, as a trained +dialogue model is needed to process the request. + + +:::caution +Make sure to secure your server, either by restricting access to the server (e.g. using firewalls), or +by enabling an authentication method. See [Security Considerations](./model-storage.mdx#server-security). + +::: + +By default, the HTTP server runs as a single process. You can change the number +of worker processes using the `SANIC_WORKERS` environment variable. It is +recommended that you set the number of workers to the number of available CPU cores +(check out the +[Sanic docs](https://sanic.readthedocs.io/en/latest/sanic/deploying.html#workers) +for more details). This will only work in combination with the +`RedisLockStore` (see [Lock Stores](./lock-stores)). + + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="server-security"></a> + +## Security Considerations + +We recommend to not expose the Rasa Server to the outside world, but +rather connect to it from your backend over a private connection (e.g. +between docker containers). + +Nevertheless, there are two authentication methods built in: + +### Token Based Auth + +Pass in the token using `--auth-token thisismysecret` when starting +the server: + +```bash +rasa run \ + -m models \ + --enable-api \ + --log-file out.log \ + --auth-token thisismysecret +``` + +Your requests should pass the token, in our case `thisismysecret`, +as a parameter: + +```bash +curl -XGET localhost:5005/conversations/default/tracker?token=thisismysecret +``` + +### JWT Based Auth + +Enable JWT based authentication using `--jwt-secret thisismysecret`. +Requests to the server need to contain a valid JWT token in +the `Authorization` header that is signed using this secret +and the `HS256` algorithm. + +The token's payload must contain an object under the `user` key, +which in turn must contain the `username` and `role` attributes. +If the `role` is `admin`, all endpoints are accessible. +If the `role` is `user`, endpoints with a `sender_id` parameter are only accessible +if the `sender_id` matches the payload's `username` property. + +```bash +rasa run \ + -m models \ + --enable-api \ + --log-file out.log \ + --jwt-secret thisismysecret +``` + +Your requests should have set a proper JWT header: + +```text +"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ" + "zdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIi" + "wiaWF0IjoxNTE2MjM5MDIyfQ.qdrr2_a7Sd80gmCWjnDomO" + "Gl8eZFVfKXA6jhncgRn-I" +``` + +The following is an example payload for a JWT token: + +```json +{ + "user": { + "username": "<sender_id>", + "role": "user" + } +} +``` + +To create and encode the token, you can use tools such as the [JWT Debugger](https://jwt.io/), or a Python module such as [PyJWT](https://pyjwt.readthedocs.io/en/latest/). diff --git a/docs/docs/index.mdx b/docs/docs/index.mdx new file mode 100644 index 000000000000..692c132c7d04 --- /dev/null +++ b/docs/docs/index.mdx @@ -0,0 +1,23 @@ +--- +id: index +sidebar_label: Docs Home +title: Contextual assistants with Rasa Open Source +description: Learn more about open-source natural language processing library Rasa for conversation handling, intent classification and entity extraction in on premise chatbots. +--- +<!-- this file is version specific, do not use `@site/...` syntax --> + +Rasa is an open source machine learning framework for automated text and voice-based +conversations. Understand messages, hold conversations, and connect to messaging +channels and APIs. + +Let's get started and [prototype an assistant](prototype-an-assistant.mdx)! + +:::note Migrate from 1.x +Coming from Rasa Open Source 1.x? Check out our [1.x to 2.x migration guide](migration-guide.mdx). +::: + +:::note Legacy Docs +These docs are for Rasa Open Source 2.0 and later. You can find documentation for Rasa +Open Source 1.x at [https://legacy-docs-v1.rasa.com](https://legacy-docs-v1.rasa.com). +::: + diff --git a/docs/docs/installation.mdx b/docs/docs/installation.mdx new file mode 100644 index 000000000000..4eccb470f579 --- /dev/null +++ b/docs/docs/installation.mdx @@ -0,0 +1,253 @@ +--- +id: installation +sidebar_label: Installation +title: Installation +description: Install Rasa Open Source on premises to enable local and customizable Natural Lanaguage Understanding and Dialogue Management. +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Quick Installation + +You can install Rasa Open Source using pip (requires Python 3.6, 3.7 or 3.8). + +<!-- TODO: REMOVE VERSION NUMBER FOR RASA 2.0 RELEASE --> +```bash +pip3 install rasa==2.0.0a2 +``` + +You are now ready to go! So what's next? +You can create a new project by running + +```bash +rasa init +``` + +:::note Want to explore first? +You can explore Rasa Open Source online using our prototyper without any installation. +At the end of the tutorial you can download the resulting assistant, install Rasa on +your machine and continue development locally. +<a className="button button--outline button button" href="prototype-an-assistant">Prototype an Assistant</a> +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="installation-guide"></a> + +## Step-by-step Installation Guide + +Prefer following video instructions? Watch our installation series on +[Youtube](https://www.youtube.com/playlist?list=PL75e0qA87dlEWUA5ToqLLR026wIkk2evk), +it explains the installation in walkthroughs for all major platforms. + +### 1. Python Environment Setup + +Check if your Python environment is already configured: + +```bash +python3 --version +pip3 --version +``` + +If these packages are already installed, these commands should display version +numbers for each step, and you can skip to the next step. + +Otherwise, proceed with the instructions below to install them. + +<Tabs values={[{"label": "Ubuntu", "value": "ubuntu"}, {"label": "macOS", "value": "macos"}, {"label": "Windows", "value": "windows"}]} groupId="operating-systems" defaultValue="ubuntu"> + <TabItem value="ubuntu"> + + Fetch the relevant packages using `apt`, and install virtualenv using `pip`. + + ```bash + sudo apt update + sudo apt install python3-dev python3-pip + ``` + + </TabItem> + <TabItem value="macos"> + + Install the [Homebrew](https://brew.sh) package manager if you haven't already. + + Once you're done, you can install Python3. + + ```bash + brew update + brew install python + ``` + + </TabItem> + <TabItem value="windows"> + + Make sure the Microsoft VC++ Compiler is installed, so python can compile + any dependencies. You can get the compiler from <a className="reference external" + href="https://visualstudio.microsoft.com/visual-cpp-build-tools/" + target="_blank">Visual Studio</a>. Download the installer and select + VC++ Build tools in the list.Install [Python 3](https://www.python.org/downloads/windows/) (64-bit version) for Windows. + + ```bat + C:\> pip3 install -U pip + ``` + + </TabItem> +</Tabs> + +### 2. Virtual Environment Setup + +This step is optional, but we strongly recommend isolating python projects +using virtual environments. Tools like +[virtualenv](https://virtualenv.pypa.io/en/latest/) and +[virtualenvwrapper](https://virtualenvwrapper.readthedocs.io/en/latest/) provide +isolated Python environments, which are cleaner than installing packages system-wide +(as they prevent dependency conflicts). They also let you install packages +without root privileges. + +<Tabs values={[{"label": "Ubuntu", "value": "ubuntu"}, {"label": "macOS", "value": "macos"}, {"label": "Windows", "value": "windows"}]} groupId="operating-systems" defaultValue="ubuntu"> + <TabItem value="ubuntu"> + + Create a new virtual environment by choosing a Python interpreter and making a `./venv` directory to hold it: + + ```bash + python3 -m venv ./venv + ``` + + Activate the virtual environment: + + ```bash + source ./venv/bin/activate + ``` + + </TabItem> + <TabItem value="macos"> + + Create a new virtual environment by choosing a Python interpreter and making a `./venv` directory to hold it: + + ```bash + python3 -m venv ./venv + ``` + + Activate the virtual environment: + + ```bash + source ./venv/bin/activate + ``` + + </TabItem> + <TabItem value="windows"> + + Create a new virtual environment by choosing a Python interpreter and making a `.\\venv` directory to hold it: + + ```bat + C:\> python3 -m venv ./venv + ``` + + Activate the virtual environment: + + ```bat + C:\> .\venv\Scripts\activate + ``` + + </TabItem> +</Tabs> + +### 3. Install Rasa Open Source + +<Tabs values={[{"label": "Ubuntu / macOS / Windows", "value": "ubuntu/macos/windows"}]} defaultValue="ubuntu/macos/windows"> + <TabItem value="ubuntu/macos/windows"> + + First make sure your `pip` version is up to date: + + ```bash + pip3 install -U pip + ``` + + To install Rasa Open Source: + + ```bash + pip3 install rasa + ``` + + </TabItem> +</Tabs> + +**Congratulations! You have successfully installed Rasa Open Source!** + +Next step: Start prototyping your first assistant online and download it afterwards + + +<a className="button button--outline button--secondary button--lg" href="prototype-an-assistant">Prototype an Assistant</a> + +--- + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="build-from-source"></a> + +## Building from Source + +If you want to use the development version of Rasa Open Source, you can get it from GitHub: + +```bash +curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python +git clone https://github.com/RasaHQ/rasa.git +cd rasa +poetry install +``` + + +--- + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="pipeline-dependencies"></a> + +## Additional Dependencies + +For some machine learning algorithms you need to install additional python packages. +They aren't installed by default to keep the footprint small. + +The page on [Tuning Your Model](./tuning-your-model.mdx) will help you pick the right +configuration for your assistant and alert you to additional dependencies. + +:::tip Just give me everything! +If you don't mind the additional dependencies lying around, you can use + +```bash +pip3 install rasa[full] +``` + +to install all needed dependencies for every configuration. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="install-spacy"></a> + +### Dependencies for spaCy + +For more information on spaCy, check out the [spaCy docs](https://spacy.io/usage/models). + +You can install it with the following commands: + +```bash +pip3 install rasa[spacy] +python3 -m spacy download en_core_web_md +python3 -m spacy link en_core_web_md en +``` + +This will install Rasa Open Source as well as spaCy and its language model +for the English language. We recommend using at least the +“medium” sized models (`_md`) instead of the spaCy's +default small `en_core_web_sm` model. Small models require less +memory to run, but will somewhat reduce intent classification performance. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="install-mitie"></a> + +### Dependencies for MITIE + +First, run + +```bash +pip3 install git+https://github.com/mit-nlp/MITIE.git +pip3 install rasa[mitie] +``` + +and then download the +[MITIE models](https://github.com/mit-nlp/MITIE/releases/download/v0.4/MITIE-models-v0.2.tar.bz2). +The file you need is `total_word_feature_extractor.dat`. Save this +anywhere. If you want to use MITIE, you need to +tell it where to find this file (in this example it was saved in the +`data` folder of the project directory). diff --git a/docs/docs/jupyter-notebooks.mdx b/docs/docs/jupyter-notebooks.mdx new file mode 100644 index 000000000000..5b0b86e68861 --- /dev/null +++ b/docs/docs/jupyter-notebooks.mdx @@ -0,0 +1,112 @@ +--- +id: jupyter-notebooks +sidebar_label: Jupyter Notebooks +title: Jupyter Notebooks +description: Learn how to integrate open source chatbot platform Rasa into Jupyter notebooks, alongside all your machine learning code. +--- + +This page contains the most important methods for using Rasa in a Jupyter notebook. + +Running asynchronous Rasa code in Jupyter Notebooks requires an extra requirement, +since Jupyter Notebooks already run on event loops. Install this requirement in +the command line before launching jupyter: + +```bash +pip install nest_asyncio +``` + +Then in the first cell of your notebook, include: + +```bash +import nest_asyncio + +nest_asyncio.apply() +print("Event loop ready.") +``` +First, you need to create a project if you don't already have one. +To do this, run this cell, which will create the `test-project` directory and make it +your working directory: + +```bash +from rasa.cli.scaffold import create_initial_project +import os + +project = "test-project" +create_initial_project(project) + +# move into project directory and show files +os.chdir(project) +print(os.listdir(".")) +``` +To train a model, you will have to tell the `train` function +where to find the relevant files. +To define variables that contain these paths, run: + +```bash +config = "config.yml" +training_files = "data/" +domain = "domain.yml" +output = "models/" +print(config, training_files, domain, output) +``` +## Train a Model + +Now we can train a model by passing in the paths to the `rasa.train` function. +Note that the training files are passed as a list. +When training has finished, `rasa.train` returns the path where the trained model has been saved. + +```bash +import rasa + +model_path = rasa.train(domain, config, [training_files], output) +print(model_path) +``` +## Chat with your assistant + +To start chatting to an assistant, call the `chat` function, passing +in the path to your saved model. If you do not have custom actions you can set `endpoints = None` or omit it: + +```bash +from rasa.jupyter import chat + +endpoints = "endpoints.yml" +chat(model_path, endpoints) +``` +## Evaluate your model against test data + +Rasa has a convenience function for getting your training data. +Rasa's `get_core_nlu_directories` is a function which +recursively finds all the stories and NLU data files in a directory +and copies them into two temporary directories. +The return values are the paths to these newly created directories. + +```bash +import rasa.data as data +stories_directory, nlu_data_directory = data.get_core_nlu_directories(training_files) +print(stories_directory, nlu_data_directory) +``` +To test your model, call the `test` function, passing in the path +to your saved model and directories containing the stories and nlu data +to evaluate on. + +```bash +rasa.test(model_path, stories_directory, nlu_data_directory) +print("Done testing.") +``` +The results of the core evaluation will be written to a file called `results`. +NLU errors will be reported to `errors.json`. +Together, they contain information about the accuracy of your model's +predictions and other metrics. + +```bash +if os.path.isfile("errors.json"): + print("NLU Errors:") + print(open("errors.json").read()) +else: + print("No NLU errors.") + +if os.path.isdir("results"): + print("\n") + print("Core Errors:") + print(open("results/failed_stories.md").read()) +``` diff --git a/docs/docs/knowledge-bases.mdx b/docs/docs/knowledge-bases.mdx new file mode 100644 index 000000000000..88db769d75aa --- /dev/null +++ b/docs/docs/knowledge-bases.mdx @@ -0,0 +1,559 @@ +--- +id: knowledge-bases +sidebar_label: Knowledge Base Actions +title: Knowledge Base Actions +description: Leverage information from knowledge bases inside conversations using ActionQueryKnowledgeBase in open source bot framework Rasa. +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="knowledge-base-actions"></a> + +:::caution +This feature is experimental. +We introduce experimental features to get feedback from our community, so we encourage you to try it out! +However, the functionality might be changed or removed in the future. +If you have feedback (positive or negative) please share it with us on the [forum](https://forum.rasa.com). + +::: + +Knowledge base actions enable you to handle the following kind of conversations: + + + +<img alt="image" src={useBaseUrl("/img/knowledge-base-example.png")} /> + +A common problem in conversational AI is that users do not only refer to certain objects by their names, +but also use reference terms such as “the first one” or “it”. +We need to keep track of the information that was presented to resolve these mentions to +the correct object. + +In addition, users may want to obtain detailed information about objects during a conversation – +for example, whether a restaurant has outside seating, or how expensive it is. +In order to respond to those user requests, knowledge about the restaurant domain is needed. +Since the information is subject to change, hard-coding the information isn't the solution. + +To handle the above challenges, Rasa can be integrated with knowledge bases. To use this integration, you can create a +custom action that inherits from `ActionQueryKnowledgeBase`, a pre-written custom action that contains +the logic to query a knowledge base for objects and their attributes. + +You can find a complete example in `examples/knowledgebasebot` +([knowledge base bot](https://github.com/RasaHQ/rasa/tree/master/examples/knowledgebasebot/)), as well as instructions +for implementing this custom action below. + +## Using `ActionQueryKnowledgeBase` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="create-knowledge-base"></a> + +### Create a Knowledge Base + +The data used to answer the user's requests will be stored in a knowledge base. +A knowledge base can be used to store complex data structures. +We suggest you get started by using the `InMemoryKnowledgeBase`. +Once you want to start working with a large amount of data, you can switch to a custom knowledge base +(see [Creating Your Own Knowledge Base](./knowledge-bases.mdx#custom-knowledge-base)). + +To initialize an `InMemoryKnowledgeBase`, you need to provide the data in a json file. +The following example contains data about restaurants and hotels. +The json structure should contain a key for every object type, i.e. `"restaurant"` and `"hotel"`. +Every object type maps to a list of objects – here we have a list of 3 restaurants and a list of 3 hotels. + +```json +{ + "restaurant": [ + { + "id": 0, + "name": "Donath", + "cuisine": "Italian", + "outside-seating": true, + "price-range": "mid-range" + }, + { + "id": 1, + "name": "Berlin Burrito Company", + "cuisine": "Mexican", + "outside-seating": false, + "price-range": "cheap" + }, + { + "id": 2, + "name": "I due forni", + "cuisine": "Italian", + "outside-seating": true, + "price-range": "mid-range" + } + ], + "hotel": [ + { + "id": 0, + "name": "Hilton", + "price-range": "expensive", + "breakfast-included": true, + "city": "Berlin", + "free-wifi": true, + "star-rating": 5, + "swimming-pool": true + }, + { + "id": 1, + "name": "Hilton", + "price-range": "expensive", + "breakfast-included": true, + "city": "Frankfurt am Main", + "free-wifi": true, + "star-rating": 4, + "swimming-pool": false + }, + { + "id": 2, + "name": "B&B", + "price-range": "mid-range", + "breakfast-included": false, + "city": "Berlin", + "free-wifi": false, + "star-rating": 1, + "swimming-pool": false + }, + ] +} +``` + +Once the data is defined in a json file, called, for example, `data.json`, you will be able use the this data file to create your +`InMemoryKnowledgeBase`, which will be passed to the action that queries the knowledge base. + +Every object in your knowledge base should have at least the `"name"` and `"id"` fields to use the default implementation. +If it doesn't, you'll have to [customize your InMemoryKnowledgeBase](./knowledge-bases.mdx#customize-in-memory-knowledge-base). + +### Define the NLU Data + +In this section: + +* we will introduce a new intent, `query_knowledge_base` + +* we will annotate `mention` entities so that our model detects indirect mentions of objects like “the + first one” + +* we will use [synonyms](./training-data-format.mdx#entity-synonyms) extensively + +For the bot to understand that the user wants to retrieve information from the knowledge base, you need to define +a new intent. We will call it `query_knowledge_base`. + +We can split requests that `ActionQueryKnowledgeBase` can handle into two categories: +(1) the user wants to obtain a list of objects of a specific type, or (2) the user wants to know about a certain +attribute of an object. The intent should contain lots of variations of both of these requests: + +```md +## intent:query_knowledge_base +- what [restaurants](object_type:restaurant) can you recommend? +- list some [restaurants](object_type:restaurant) +- can you name some [restaurants](object_type:restaurant) please? +- can you show me some [restaurant](object_type:restaurant) options +- list [German](cuisine) [restaurants](object_type:restaurant) +- do you have any [mexican](cuisine) [restaurants](object_type:restaurant)? +- do you know the [price range](attribute:price-range) of [that one](mention)? +- what [cuisine](attribute) is [it](mention)? +- do you know what [cuisine](attribute) the [last one](mention:LAST) has? +- does the [first one](mention:1) have [outside seating](attribute:outside-seating)? +- what is the [price range](attribute:price-range) of [Berlin Burrito Company](restaurant)? +- what about [I due forni](restaurant)? +- can you tell me the [price range](attribute) of [that restaurant](mention)? +- what [cuisine](attribute) do [they](mention) have? + ... +``` + +The above example just shows examples related to the restaurant domain. +You should add examples for every object type that exists in your knowledge base to the same `query_knowledge_base` intent. + +In addition to adding a variety of training examples for each query type, +you need to specify and annotate the following entities in your training examples: + +* `object_type`: Whenever a training example references a specific object type from your knowledge base, the object type should + be marked as an entity. Use [synonyms](./training-data-format.mdx#entity-synonyms) to map e.g. `restaurants` to `restaurant`, the correct + object type listed as a key in the knowledge base. + +* `mention`: If the user refers to an object via “the first one”, “that one”, or “it”, you should mark those terms + as `mention`. We also use synonyms to map some of the mentions to symbols. You can learn about that + in [resolving mentions](./knowledge-bases.mdx#resolve-mentions). + +* `attribute`: All attribute names defined in your knowledge base should be identified as `attribute` in the + NLU data. Again, use synonyms to map variations of an attribute name to the one used in the + knowledge base. + +Remember to add those entities to your domain file (as entities and slots): + +```yaml +entities: + - object_type + - mention + - attribute + +slots: + object_type: + type: unfeaturized + mention: + type: unfeaturized + attribute: + type: unfeaturized +``` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="create-action-query-knowledge-base"></a> + +### Create an Action to Query your Knowledge Base + +To create your own knowledge base action, you need to inherit `ActionQueryKnowledgeBase` and pass the knowledge +base to the constructor of `ActionQueryKnowledgeBase`. + +```python +from rasa_sdk.knowledge_base.storage import InMemoryKnowledgeBase +from rasa_sdk.knowledge_base.actions import ActionQueryKnowledgeBase + +class MyKnowledgeBaseAction(ActionQueryKnowledgeBase): + def __init__(self): + knowledge_base = InMemoryKnowledgeBase("data.json") + super().__init__(knowledge_base) +``` + +Whenever you create an `ActionQueryKnowledgeBase`, you need to pass a `KnowledgeBase` to the constructor. +It can be either an `InMemoryKnowledgeBase` or your own implementation of a `KnowledgeBase` +(see [Creating Your Own Knowledge Base](./knowledge-bases.mdx#custom-knowledge-base)). +You can only pull information from one knowledge base, as the usage of multiple knowledge bases at the same time is not supported. + +This is the entirety of the code for this action! The name of the action is `action_query_knowledge_base`. +Don't forget to add it to your domain file: + +```yaml +actions: +- action_query_knowledge_base +``` + +:::note +If you overwrite the default action name `action_query_knowledge_base`, you need to add the following three +unfeaturized slots to your domain file: `knowledge_base_objects`, `knowledge_base_last_object`, and +`knowledge_base_last_object_type`. +The slots are used internally by `ActionQueryKnowledgeBase`. +If you keep the default action name, those slots will be automatically added for you. + +::: + +You also need to make sure to add a story to your stories file that includes the intent `query_knowledge_base` and +the action `action_query_knowledge_base`. For example: + +```md +## Happy Path +* greet + - utter_greet +* query_knowledge_base + - action_query_knowledge_base +* goodbye + - utter_goodbye +``` + +The last thing you need to do is to define the response `utter_ask_rephrase` in your domain file. +If the action doesn't know how to handle the user's request, it will use this response to ask the user to rephrase. +For example, add the following responses to your domain file: + +```md +utter_ask_rephrase: +- text: "Sorry, I'm not sure I understand. Could you rephrase it?" +- text: "Could you please rephrase your message? I didn't quite get that." +``` + +After adding all the relevant pieces, the action is now able to query the knowledge base. + +## How It Works + +`ActionQueryKnowledgeBase` looks at both the entities that were picked up in the request as well as the +previously set slots to decide what to query for. + +### Query the Knowledge Base for Objects + +In order to query the knowledge base for any kind of object, the user's request needs to include the object type. +Let's look at an example: + +Can you please name some restaurants? + +This question includes the object type of interest: “restaurant.” +The bot needs to pick up on this entity in order to formulate a query – otherwise the action would not know what objects the user is interested in. + +When the user says something like: + +What Italian restaurant options in Berlin do I have? + +The user wants to obtain a list of restaurants that (1) have Italian cuisine and (2) are located in +Berlin. If the NER detects those attributes in the request of the user, the action will use those to filter the +restaurants found in the knowledge base. + +In order for the bot to detect these attributes, you need to mark “Italian” and “Berlin” as entities in the NLU data: + +```md +What [Italian](cuisine) [restaurant](object_type) options in [Berlin](city) do I have?. +``` + +The names of the attributes, “cuisine” and “city,” should be equal to the ones used in the knowledge base. +You also need to add those as entities and slots to the domain file. + +### Query the Knowledge Base for an Attribute of an Object + +If the user wants to obtain specific information about an object, the request should include both the object and +attribute of interest. +For example, if the user asks something like: + +What is the cuisine of Berlin Burrito Company? + +The user wants to obtain the “cuisine” (attribute of interest) for the restaurant “Berlin Burrito Company” (object of +interest). + +The attribute and object of interest should be marked as entities in the NLU training data: + +```md +What is the [cuisine](attribute) of [Berlin Burrito Company](restaurant)? +``` + +Make sure to add the object type, “restaurant,” to the domain file as entity and slot. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="resolve-mentions"></a> + +### Resolve Mentions + +Following along from the above example, users may not always refer to restaurants by their names. +Users can either refer to the object of interest by its name, e.g. “Berlin Burrito Company” (representation string +of the object), or they may refer to a previously listed object via a mention, for example: + +What is the cuisine of the second restaurant you mentioned? + +Our action is able to resolve these mentions to the actual object in the knowledge base. +More specifically, it can resolve two mention types: (1) ordinal mentions, such as “the first one”, and (2) +mentions such as “it” or “that one”. + +**Ordinal Mentions** + +When a user refers to an object by its position in a list, it is called an ordinal mention. Here's an example: + +* User: What restaurants in Berlin do you know? + +* Bot: Found the following objects of type 'restaurant': 1: I due forni 2: PastaBar 3: Berlin Burrito Company + +* User: Does the first one have outside seating? + +The user referred to “I due forni” by the term “the first one”. +Other ordinal mentions might include “the second one,” “the last one,” “any,” or “3”. + +Ordinal mentions are typically used when a list of objects was presented to the user. +To resolve those mentions to the actual object, we use an ordinal mention mapping which is set in the +`KnowledgeBase` class. +The default mapping looks like: + +```python +{ + "1": lambda l: l[0], + "2": lambda l: l[1], + "3": lambda l: l[2], + "4": lambda l: l[3], + "5": lambda l: l[4], + "6": lambda l: l[5], + "7": lambda l: l[6], + "8": lambda l: l[7], + "9": lambda l: l[8], + "10": lambda l: l[9], + "ANY": lambda l: random.choice(l), + "LAST": lambda l: l[-1], +} +``` + +The ordinal mention mapping maps a string, such as “1”, to the object in a list, e.g. `lambda l: l[0]`, meaning the +object at index `0`. + +As the ordinal mention mapping does not, for example, include an entry for “the first one”, +it is important that you use [Entity Synonyms](./training-data-format.mdx#entity-synonyms) to map “the first one” in your NLU data to “1”: + +```md +Does the [first one](mention:1) have [outside seating](attribute:outside-seating)? +``` + +The NER detects “first one” as a `mention` entity, but puts “1” into the `mention` slot. +Thus, our action can take the `mention` slot together with the ordinal mention mapping to resolve “first one” to +the actual object “I due forni”. + +You can overwrite the ordinal mention mapping by calling the function `set_ordinal_mention_mapping()` on your +`KnowledgeBase` implementation (see [Customizing the InMemoryKnowledgeBase](./knowledge-bases.mdx#customize-in-memory-knowledge-base)). + +**Other Mentions** + +Take a look at the following conversation: + +* User: What is the cuisine of PastaBar? + +* Bot: PastaBar has an Italian cuisine. + +* User: Does it have wifi? + +* Bot: Yes. + +* User: Can you give me an address? + +In the question “Does it have wifi?”, the user refers to “PastaBar” by the word “it”. +If the NER detected “it” as the entity `mention`, the knowledge base action would resolve it to the last mentioned +object in the conversation, “PastaBar”. + +In the next input, the user refers indirectly to the object “PastaBar” instead of mentioning it explicitly. +The knowledge base action would detect that the user wants to obtain the value of a specific attribute, in this case, the address. +If no mention or object was detected by the NER, the action assumes the user is referring to the most recently +mentioned object, “PastaBar”. + +You can disable this behavior by setting `use_last_object_mention` to `False` when initializing the action. + +## Customization + +### Customizing `ActionQueryKnowledgeBase` + +You can overwrite the following two functions of `ActionQueryKnowledgeBase` if you'd like to customize what the bot +says to the user: + +* `utter_objects()` + +* `utter_attribute_value()` + +`utter_objects()` is used when the user has requested a list of objects. +Once the bot has retrieved the objects from the knowledge base, it will respond to the user by default with a message, formatted like: + +Found the following objects of type 'restaurant': +1: I due forni +2: PastaBar +3: Berlin Burrito Company + +Or, if no objects are found, + +I could not find any objects of type 'restaurant'. + +If you want to change the utterance format, you can overwrite the method `utter_objects()` in your action. + +The function `utter_attribute_value()` determines what the bot utters when the user is asking for specific information about +an object. + +If the attribute of interest was found in the knowledge base, the bot will respond with the following utterance: + +'Berlin Burrito Company' has the value 'Mexican' for attribute 'cuisine'. + +If no value for the requested attribute was found, the bot will respond with + +Did not find a valid value for attribute 'cuisine' for object 'Berlin Burrito Company'. + +If you want to change the bot utterance, you can overwrite the method `utter_attribute_value()`. + +:::note +There is a [tutorial](https://blog.rasa.com/integrating-rasa-with-knowledge-bases/) on our blog about +how to use knowledge bases in custom actions. The tutorial explains the implementation behind +`ActionQueryKnowledgeBase` in detail. + +::: + +### Creating Your Own Knowledge Base Actions + +`ActionQueryKnowledgeBase` should allow you to easily get started with integrating knowledge bases into your actions. +However, the action can only handle two kind of user requests: + +* the user wants to get a list of objects from the knowledge base + +* the user wants to get the value of an attribute for a specific object + +The action is not able to compare objects or consider relations between objects in your knowledge base. +Furthermore, resolving any mention to the last mentioned object in the conversation might not always be optimal. + +If you want to tackle more complex use cases, you can write your own custom action. +We added some helper functions to `rasa_sdk.knowledge_base.utils` +([link to code](https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/) ) +to help you when implement your own solution. +We recommend using `KnowledgeBase` interface so that you can still use the `ActionQueryKnowledgeBase` +alongside your new custom action. + +If you write a knowledge base action that tackles one of the above use cases or a new one, be sure to tell us about +it on the [forum](https://forum.rasa.com)! + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="customize-in-memory-knowledge-base"></a> + +### Customizing the `InMemoryKnowledgeBase` + +The class `InMemoryKnowledgeBase` inherits `KnowledgeBase`. +You can customize your `InMemoryKnowledgeBase` by overwriting the following functions: + +* `get_key_attribute_of_object()`: To keep track of what object the user was talking about last, we store the value + of the key attribute in a specific slot. Every object should have a key attribute that is unique, + similar to the primary key in a relational database. By default, the name of the key attribute for every object type + is set to `id`. You can overwrite the name of the key attribute for a specific object type by calling + `set_key_attribute_of_object()`. + +* `get_representation_function_of_object()`: Let's focus on the following restaurant: + + ```json + { + "id": 0, + "name": "Donath", + "cuisine": "Italian", + "outside-seating": true, + "price-range": "mid-range" + } + ``` + + When the user asks the bot to list any Italian restaurant, it doesn't need all of the details of the restaurant. + Instead, you want to provide a meaningful name that identifies the restaurant – in most cases, the name of the object will do. + The function `get_representation_function_of_object()` returns a lambda function that maps the + above restaurant object to its name. + + ```python + lambda obj: obj["name"] + ``` + + This function is used whenever the bot is talking about a specific object, so that the user is presented a meaningful + name for the object. + + By default, the lambda function returns the value of the `"name"` attribute of the object. + If your object does not have a `"name"` attribute , or the `"name"` of an object is + ambiguous, you should set a new lambda function for that object type by calling + `set_representation_function_of_object()`. + +* `set_ordinal_mention_mapping()`: The ordinal mention mapping is needed to resolve an ordinal mention, such as + “second one,” to an object in a list. By default, the ordinal mention mapping looks like this: + + ```python + { + "1": lambda l: l[0], + "2": lambda l: l[1], + "3": lambda l: l[2], + "4": lambda l: l[3], + "5": lambda l: l[4], + "6": lambda l: l[5], + "7": lambda l: l[6], + "8": lambda l: l[7], + "9": lambda l: l[8], + "10": lambda l: l[9], + "ANY": lambda l: random.choice(l), + "LAST": lambda l: l[-1], + } + ``` + + You can overwrite it by calling the function `set_ordinal_mention_mapping()`. + If you want to learn more about how this mapping is used, check out [Resolve Mentions](./knowledge-bases.mdx#resolve-mentions). + +See the [example bot](https://github.com/RasaHQ/rasa/blob/master/examples/knowledgebasebot/actions.py) for an +example implementation of an `InMemoryKnowledgeBase` that uses the method `set_representation_function_of_object()` +to overwrite the default representation of the object type “hotel.” +The implementation of the `InMemoryKnowledgeBase` itself can be found in the +[rasa-sdk](https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/) package. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="custom-knowledge-base"></a> + +### Creating Your Own Knowledge Base + +If you have more data or if you want to use a more complex data structure that, for example, involves relations between +different objects, you can create your own knowledge base implementation. +Just inherit `KnowledgeBase` and implement the methods `get_objects()`, `get_object()`, and +`get_attributes_of_object()`. The [knowledge base code](https://github.com/RasaHQ/rasa-sdk/tree/master/rasa_sdk/knowledge_base/) +provides more information on what those methods should do. + +You can also customize your knowledge base further, by adapting the methods mentioned in the section +[Customizing the InMemoryKnowledgeBase](./knowledge-bases.mdx#customize-in-memory-knowledge-base). + +:::note +We wrote a [blog post](https://blog.rasa.com/set-up-a-knowledge-base-to-encode-domain-knowledge-for-rasa/) +that explains how you can set up your own knowledge base. + +::: diff --git a/docs/docs/lock-stores.mdx b/docs/docs/lock-stores.mdx new file mode 100644 index 000000000000..13227ce2801c --- /dev/null +++ b/docs/docs/lock-stores.mdx @@ -0,0 +1,85 @@ +--- +id: lock-stores +sidebar_label: Lock Stores +title: Lock Stores +description: Messages that are being processed lock Rasa for a given conversation ID to ensure that multiple incoming messages for that conversation do not interfere with each other. Rasa provides multiple implementations to maintain conversation locks. +--- + +Rasa uses a ticket lock mechanism to ensure that incoming messages for a given +conversation ID are processed in the right order, and locks conversations while +messages are actively processed. This means multiple Rasa servers can +be run in parallel as replicated services, and clients do not necessarily need to +address the same node when sending messages for a given conversation ID. + +## InMemoryLockStore (default) + + +* **Description** + + `InMemoryLockStore` is the default lock store. It maintains conversation locks + within a single process. + + :::note + This lock store should not be used when multiple Rasa servers are run + parallel. + + ::: + + + +* **Configuration** + + To use the `InMemoryTrackerStore` no configuration is needed. + + +## RedisLockStore + + +* **Description** + + `RedisLockStore` maintains conversation locks using Redis as a persistence layer. + This is the recommended lock store for running a replicated set of Rasa servers. + + + +* **Configuration** + + To set up Rasa with Redis the following steps are required: + + 1. Start your Redis instance + + 2. Add required configuration to your `endpoints.yml` + + ```yaml + lock_store: + type: "redis" + url: <url of the redis instance, e.g. localhost> + port: <port of your redis instance, usually 6379> + password: <password used for authentication> + db: <number of your database within redis, e.g. 0> + ``` + + 3. To start the Rasa Core server using your Redis backend, add the `--endpoints` + flag, e.g.: + + ```bash + rasa run -m models --endpoints endpoints.yml + ``` + + + +* **Parameters** + + * `url` (default: `localhost`): The url of your redis instance + + * `port` (default: `6379`): The port which redis is running on + + * `db` (default: `1`): The number of your redis database + + * `password` (default: `None`): Password used for authentication + (`None` equals no authentication) + + * `use_ssl` (default: `False`): Whether or not the communication is encrypted + + * `socket_timeout` (default: `10`): Time in seconds after which an + error is raised if Redis doesn't answer \ No newline at end of file diff --git a/docs/docs/messaging-and-voice-channels.mdx b/docs/docs/messaging-and-voice-channels.mdx new file mode 100644 index 000000000000..eafd24e21d47 --- /dev/null +++ b/docs/docs/messaging-and-voice-channels.mdx @@ -0,0 +1,72 @@ +--- +id: messaging-and-voice-channels +sidebar_label: Connecting to a Channel +title: Connecting to a Channel +description: Check out how to make your Rasa assistant available on platforms like Facebook Messenger, Slack, Telegram or even your very own website. +--- + +<!-- TODO: needs full general information about setting them up, what url to hit it at, etc. --> + +To make your assistant available on a messaging platform you need to provide credentials +in a `credentials.yml` file. +An example file is created when you run `rasa init`, so it's easiest to edit that file +and add your credentials there. Here is an example with Facebook credentials: + +```yaml title="credentials.yml" +facebook: + verify: "rasa-bot" + secret: "3e34709d01ea89032asdebfe5a74518" + page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" +``` + +Learn how to make your assistant available on: + +* [Your Own Website](./connectors/your-own-website.mdx) + +* [Facebook Messenger](./connectors/facebook-messenger.mdx) + +* [Slack](./connectors/slack.mdx) + +* [Telegram](./connectors/telegram.mdx) + +* [Twilio](./connectors/twilio.mdx) + +* [Microsoft Bot Framework](./connectors/microsoft-bot-framework.mdx) + +* [Cisco Webex Teams](./connectors/cisco-webex-teams.mdx) + +* [RocketChat](./connectors/rocketchat.mdx) + +* [Mattermost](./connectors/mattermost.mdx) + +* [Google Hangouts Chat](./connectors/hangouts.mdx) + +* [Custom Connectors](./connectors/custom-connectors.mdx) + + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="using-ngrok"></a> + +## Testing Channels on Your Local Machine with Ngrok + +You can use [ngrok](https://ngrok.com/) to create a connection to your local +computer that is publicly available on the internet. +You don't need this when running Rasa on a server because, you can set up a domain +name to point to that server's IP address, or use the IP address itself. + +After installing ngrok, run: + +```bash +ngrok http 5005; rasa run +``` + +Your webhook address will look like the following: + +* `https://yyyyyy.ngrok.io/webhooks/<CHANNEL>/webhook`, e.g. + +* `https://yyyyyy.ngrok.io/webhooks/facebook/webhook` + +:::caution +With the free-tier of ngrok, you can run into limits on how many connections you can make per minute. +As of writing this, it is set to 40 connections / minute. + +::: diff --git a/docs/docs/migrate-from.mdx b/docs/docs/migrate-from.mdx new file mode 100644 index 000000000000..fcb973c56ced --- /dev/null +++ b/docs/docs/migrate-from.mdx @@ -0,0 +1,27 @@ +--- +id: migrate-from +sidebar_label: Migrate From (beta) +title: Migrate From Other Tools (beta) +--- + +Here are a few reasons why we see developers switching from other tools to Rasa Open Source: + +* **Faster**: Runs locally - no HTTP requests or server round trips required + +* **Customizable**: Tune models and get higher accuracy with your data set + +* **Open source**: No risk of vendor lock-in - Rasa Open Source is under the Apache 2.0 license and you can +use it in commercial projects + +In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues +with machine learning instead of rules - check it out in <a className="reference external" href="http://blog.rasa.com/a-new-approach-to-conversational-software/" target="_blank">this blog post</a>. + +Learn how to migrate from: + +* [Google Dialogflow](./migrate-from/google-dialogflow-to-rasa.mdx) + +* [Wit.ai](./migrate-from/facebook-wit-ai-to-rasa.mdx) + +* [Microsoft LUIS](./migrate-from/microsoft-luis-to-rasa.mdx) + +* [IBM Watson](./migrate-from/ibm-watson-to-rasa.mdx) \ No newline at end of file diff --git a/docs/docs/migrate-from/facebook-wit-ai-to-rasa.mdx b/docs/docs/migrate-from/facebook-wit-ai-to-rasa.mdx new file mode 100644 index 000000000000..66e5a76f5972 --- /dev/null +++ b/docs/docs/migrate-from/facebook-wit-ai-to-rasa.mdx @@ -0,0 +1,75 @@ +--- +id: facebook-wit-ai-to-rasa +sidebar_label: "Rasa as open source alternative to Facebook\u2019s Wit.ai - Migration\ + \ Guide" +title: "Rasa as open source alternative to Facebook\u2019s Wit.ai - Migration Guide" +description: Open source alternative to Facebook's Wit.ai for conversational bots and NLP +--- + +To get started with migrating your application from Wit.ai to Rasa: + +## Step 1: Export your Training Data from Wit.ai + +Navigate to your app's setting page by clicking the **Settings** icon in the upper right corner. Scroll down to **Export your data** and hit the button **Download .zip with your data**. + +This will download a file with a `.zip` extension. Unzip this file to create a folder. The file you want from your download is called `expressions.json` + +## Step 2: Create a Rasa Project + +To create a Rasa project, run: + +```bash +rasa init +``` + +This will create a directory called `data`. +Remove the files in this directory, and +move the expressions.json file into this directory. + +```bash +rm -r data/* +mv /path/to/expressions.json data/ +``` + +## Step 3: Train your NLU model + +To train a model using your Wit data, run: + +```bash +rasa train nlu +``` + +## Step 4: Test your NLU model + +Let's see how your NLU model will interpret some test messages. +To start a testing session, run: + +```bash +rasa shell nlu +``` + +This will prompt your for input. +Type a test message and press 'Enter'. +The output of your NLU model will be printed to the screen. +You can keep entering messages and test as many as you like. +Press 'control + C' to quit. + +## Step 5: Start a Server with your NLU Model + +To start a server with your NLU model, run: + +```bash +rasa run nlu +``` + +This will start a server listening on port 5005. + +To send a request to the server, run: + +```bash +curl 'localhost:5005/model/parse?emulation_mode=wit' -d '{"text": "hello"}' +The `emulation_mode` parameter tells Rasa that you want your json +response to have the same format as you would get from wit.ai. +You can also leave it out to get the result in the usual Rasa format. + +Join the [Rasa Community Forum](https://forum.rasa.com/) and let us know how your migration went! diff --git a/docs/docs/migrate-from/google-dialogflow-to-rasa.mdx b/docs/docs/migrate-from/google-dialogflow-to-rasa.mdx new file mode 100644 index 000000000000..081482584451 --- /dev/null +++ b/docs/docs/migrate-from/google-dialogflow-to-rasa.mdx @@ -0,0 +1,91 @@ +--- +id: google-dialogflow-to-rasa +sidebar_label: Rasa as open source alternative to Google Dialogflow - Migration Guide +title: Rasa as open source alternative to Google Dialogflow - Migration Guide +description: Open source alternative to Google Dialogflow for conversational bots and NLP +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +Let's get started with migrating your application from Dialogflow to Rasa (you can find a more detailed +tutorial <a className="reference external" href="http://blog.rasa.com/how-to-migrate-your-existing-google-dialogflow-assistant-to-rasa/" target="_blank">here</a>): + +## Step 1: Export your data from Dialogflow + +Navigate to your agent's settings by clicking the gear icon. + + + +<img alt="Dialogflow Export" src={useBaseUrl("/img/dialogflow_export.png")} width="240" /> + +Click on the 'Export and Import' tab and click on the 'Export as ZIP' button. + + + +<img alt="Dialogflow Export 2" src={useBaseUrl("/img/dialogflow_export_2.png")} width="675" /> + +This will download a file with a `.zip` extension. Unzip this file to create a folder. + +## Step 2: Create a Rasa Project + +To create a Rasa project, run: + +```bash +rasa init +``` + +This will create a directory called `data`. +Remove the files in this directory, and +move your unzipped folder into this directory. + +```bash +rm -r data/* +mv testagent data/ +``` + +## Step 3: Train your NLU model + +To train a model using your dialogflow data, run: + +```bash +rasa train nlu +``` + +## Step 4: Test your NLU model + +Let's see how your NLU model will interpret some test messages. +To start a testing session, run: + +```bash +rasa shell nlu +``` + +This will prompt your for input. +Type a test message and press 'Enter'. +The output of your NLU model will be printed to the screen. +You can keep entering messages and test as many as you like. +Press 'control + C' to quit. + +## Step 5: Start a Server with your NLU Model + +To start a server with your NLU model, run: + +```bash +rasa run +``` + +This will start a server listening on port 5005. + +To send a request to the server, run: + +```bash +curl 'localhost:5005/model/parse?emulation_mode=dialogflow' -d '{"text": "hello"}' +The `emulation_mode` parameter tells Rasa that you want your json +response to have the same format as you would get from dialogflow. +You can also leave it out to get the result in the usual Rasa format. + +## Terminology: + +The words `intent`, `entity`, and `utterance` have the same meaning in Rasa as they do in Dialogflow. +In Dialogflow, there is a concept called `Fulfillment`. In Rasa we call this a [Custom Action](./actions/.mdx#custom-actions). + +Join the [Rasa Community Forum](https://forum.rasa.com/) and let us know how your migration went! diff --git a/docs/docs/migrate-from/ibm-watson-to-rasa.mdx b/docs/docs/migrate-from/ibm-watson-to-rasa.mdx new file mode 100644 index 000000000000..a9fccd12478a --- /dev/null +++ b/docs/docs/migrate-from/ibm-watson-to-rasa.mdx @@ -0,0 +1,10 @@ +--- +id: ibm-watson-to-rasa +sidebar_label: Rasa as open source alternative to IBM Watson - Migration Tips +title: Rasa as open source alternative to IBM Watson - Migration Tips +description: Open source alternative to IBM Watson for conversational bots and NLP +--- + +There is no support for IBM Watson yet. However, a group of community members is working on a way +to use <a className="reference external" href="https://developer.ibm.com/tutorials/learn-how-to-export-import-a-watson-assistant-workspace/" target="_blank">exported IBM Watson workspaces</a> +in Rasa. If you're interested in that, check out our <a className="reference external" href="https://forum.rasa.com/" target="_blank">Community Forum</a>. diff --git a/docs/docs/migrate-from/microsoft-luis-to-rasa.mdx b/docs/docs/migrate-from/microsoft-luis-to-rasa.mdx new file mode 100644 index 000000000000..a1415eb9c276 --- /dev/null +++ b/docs/docs/migrate-from/microsoft-luis-to-rasa.mdx @@ -0,0 +1,87 @@ +--- +id: microsoft-luis-to-rasa +sidebar_label: Rasa as open source alternative to Microsoft LUIS - Migration Guide +title: Rasa as open source alternative to Microsoft LUIS - Migration Guide +description: Open source alternative to Microsoft LUIS for conversational bots and NLP +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +Let's get started with migrating your application from LUIS to Rasa: + +## Step 1: Export your Training Data from LUIS + +Go to your list of [LUIS applications](https://www.luis.ai/applications) and click +on the three dots menu next to the app you want to export. + + + +<img alt="LUIS Export" src={useBaseUrl("/img/luis_export.png")} width="240" /> + +Select 'Export App'. This will download a file with a `.json` extension that can be imported directly into Rasa. + +## Step 2: Create a Rasa Project + +To create a Rasa project, run: + +```bash +rasa init +``` + +This will create a directory called `data`. +Remove the files in this directory, and +move your json file into this directory. + +```bash +rm -r data/* +mv /path/to/file.json data/ +``` + +## Step 3: Train your NLU model + +To train a model using your LUIS data, run: + +```bash +rasa train nlu +``` + +## Step 4: Test your NLU model + +Let's see how your NLU model will interpret some test messages. +To start a testing session, run: + +```bash +rasa shell nlu +``` + +This will prompt your for input. +Type a test message and press 'Enter'. +The output of your NLU model will be printed to the screen. +You can keep entering messages and test as many as you like. +Press 'control + C' to quit. + +## Step 5: Start a Server with your NLU Model + +To start a server with your NLU model, run: + +```bash +rasa run +``` + +This will start a server listening on port 5005. + +To send a request to the server, run: + +```bash +curl 'localhost:5005/model/parse?emulation_mode=luis' -d '{"text": "hello"}' +The `emulation_mode` parameter tells Rasa that you want your json +response to have the same format as you would get from LUIS. +You can also leave it out to get the result in the usual Rasa format. + +## Terminology: + +The words `intent`, `entity`, and `utterance` have the same meaning in Rasa as they do +in LUIS. +LUIS's `patterns` feature is very similar to Rasa NLU's [regex features](./training-data-format/.mdx#regular-expression-features) +LUIS's `phrase lists` feature does not currently have an equivalent in Rasa NLU. + +Join the [Rasa Community Forum](https://forum.rasa.com/) and let us know how your migration went! diff --git a/docs/docs/migration-guide.mdx b/docs/docs/migration-guide.mdx new file mode 100644 index 000000000000..38b0d3c9b6b7 --- /dev/null +++ b/docs/docs/migration-guide.mdx @@ -0,0 +1,614 @@ +--- +id: migration-guide +sidebar_label: Version Migration Guide +title: Version Migration Guide +description: Information about changes between major versions of chatbot framework Rasa Core and how you can migrate from one version to another. +--- + +This page contains information about changes between major versions and +how you can migrate from one version to another. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="migration-to-rasa-2-0"></a> + +## Rasa 1.10 to Rasa 2.0 + +### General + +* The deprecated brokers `FileProducer`, `KafkaProducer`, `PikaProducer` + and the `SQLProducer` have been removed. If you used these brokers in your + `endpoints.yml` make sure to use the renamed variants instead: + - `FileProducer` became `FileEventBroker` + - `KafkaProducer` became `KafkaEventBroker` + - `PikaProducer` became `PikaEventBroker` + - `SQLProducer` became `SQLEventBroker` + +* The deprecated `EmbeddingIntentClassifier` has been removed. If you used this + component in your `pipeline` configuration (`config.yml`) you can replace it + with `DIETClassifier`. It accepts the same configuration parameters. + +* The deprecated `KerasPolicy` has been removed. If you used this + component in your `policies` configuration (`config.yml`) you can replace it + with `TEDPolicy`. It accepts the same configuration parameters. + +### Training data files + +You can convert existing NLU and Stories training data files in the `Markdown` format +to the new `YAML` format using following commands: + +```bash +rasa data convert nlu -f yaml --data={SOURCE_DIR} --out={TARGET_DIR} +rasa data convert core -f yaml --data={SOURCE_DIR} --out={TARGET_DIR} +``` + +Converted files will have the same names as the original ones but with a +`_converted.yml` suffix. + +Please note, that it's not currently possible to convert stories that contain forms +automatically, these stories will be skipped and should be converted manually, see +`Rule Policy` below. + +### Rule Policy + +With the introduction of [Rules](./rules.mdx), the following policies are +deprecated: + +- [Mapping Policy](./policies.mdx#mapping-policy) +- [Fallback Policy](./policies.mdx#fallback-policy) +- [Two-Stage-Fallback Policy](./policies.mdx#two-stage-fallback-policy) +- [Form Policy](./policies.mdx#form-policy) + +#### Migrating from the Mapping Policy + +If you previously used the [Mapping Policy](./policies.mdx#mapping-policy), you +can follow the documentation on [FAQs](./rules.mdx#faqs--mapping-intents-to-actions) to convert your mapped +intents to rules. Suppose you previously mapped an intent `ask_is_bot` as follows: + +```yaml +intents: + - ask_is_bot: + triggers: action_is_bot +``` + +This becomes the following rule: + +```yaml +rules: +- rule: Rule to map `ask_is_bot` intent + steps: + - intent: ask_is_bot + - action: action_is_bot +``` + +#### Migrating from the Fallback Policy + +If you previously used the [Fallback Policy](./policies.mdx#fallback-policy), the following model +configuration would translate as follows given a previous configuration like this: + +```yaml +policies: + - name: "FallbackPolicy" + nlu_threshold: 0.4 + core_threshold: 0.3 + fallback_action_name: "action_default_fallback" + ambiguity_threshold: 0.1 +``` + +The new configuration would then look like: + +```yaml +policies: +# Other policies +- name: RulePolicy + core_fallback_threshold: 0.3 + core_fallback_action_name: "action_default_fallback" + +pipeline: + # Other components + - name: FallbackClassifier + threshold: 0.4 + ambiguity_threshold: 0.1 +``` + +In addition, you need to add a [Rule](./rules.mdx) to specify which action to run +in case of low NLU confidence: + +```yaml +rules: +- rule: Ask the user to rephrase whenever they send a message with low NLU confidence + steps: + - intent: nlu_fallback + - action: utter_please_rephrase +``` + +Please see the [Fallback Actions](./fallback-handoff.mdx#fallbackactions) documentation for more +information. + +#### Migrating from the Two-Stage-Fallback Policy + +If you previously used the +[Two-Stage-Fallback Policy](./policies.mdx#two-stage-fallback-policy), the following model +configuration would translate as follows given a previous configuration like this: + +```yaml +policies: + - name: TwoStageFallbackPolicy + nlu_threshold: 0.4 + ambiguity_threshold: 0.1 + core_threshold: 0.3 + fallback_core_action_name: "action_default_fallback" + fallback_nlu_action_name: "action_default_fallback" + deny_suggestion_intent_name: "out_of_scope" +``` + +The new configuration would then look like: + +```yaml +policies: +# Other policies +- name: RulePolicy + core_fallback_threshold: 0.3 + core_fallback_action_name: "action_default_fallback" + +pipeline: + # Other components + - name: FallbackClassifier + threshold: 0.4 + ambiguity_threshold: 0.1 +``` + +In addition you need to add a [Rule](./rules.mdx) to activate the Two-Stage Fallback for +messages with low NLU confidence. + +```yaml +rules: +- rule: Implementation of the TwoStageFallbackPolicy + steps: + # This intent is automatically triggered by the `FallbackClassifier` in the NLU + # pipeline in case the intent confidence was below the specified threshold. + - intent: nlu_fallback + # The Fallback is now implemented as a form. + - action: two_stage_fallback + - active_loop: two_stage_fallback +``` + +Note that the previous parameters `fallback_nlu_action_name` and +`deny_suggestion_intent_name` are no longer configurable and have the fixed values +`action_default_fallback` and `out_of_scope`. + +Please see the [Fallback Actions](./fallback-handoff.mdx#fallbackactions) documentation for more +information. + +#### Form Policy + +As of Rasa Open Source 2.0 the logic for [Forms](./forms.mdx) has been moved from the +Rasa SDK to Rasa Open Source to simplify implementations of custom action libraries. +This means that you no longer need to use the `FormAction` when implementing custom +actions with the Python SDK. Instead you can use a regular `Action` to validate and +request slots. + +It is now recommended to move the slot mappings from your custom action to the +[Domain](./domain.mdx) of your bot. Consider a custom form action like this: + +```python + +class RestaurantForm(FormAction): + def name(self) -> Text: + return "restaurant_form" + + @staticmethod + def required_slots(tracker: Tracker) -> List[Text]: + return ["cuisine"] + + def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: + return { + "cuisine": self.from_entity(entity="cuisine", not_intent="chitchat"), + } + + @staticmethod + def cuisine_db() -> List[Text]: + """Database of supported cuisines""" + + return ["caribbean", "chinese", "french"] + + def validate_cuisine( + self, + value: Text, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> Dict[Text, Any]: + """Validate cuisine value.""" + + if value.lower() in self.cuisine_db(): + # validation succeeded, set the value of the "cuisine" slot to value + return {"cuisine": value} + else: + dispatcher.utter_message(template="utter_wrong_cuisine") + # validation failed, set this slot to None, meaning the + # user will be asked for the slot again + return {"cuisine": None} + + def submit( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[Dict]: + """Define what the form has to do + after all required slots are filled""" + + # utter submit template + dispatcher.utter_message(template="utter_submit") + return [] +``` + +Start the migration by adding the [RulePolicy](./policies.mdx#rule-policy) to your +model configuration: + +```yaml +policies: +# Other policies +# ... +- name: RulePolicy +``` + +Then you need to define the form and the required slots in the domain as described in +[Forms](./forms.mdx#defining-a-form): + +```yaml +forms: + restaurant_form: + cuisine: + - type: cuisine + entity: cuisine + not_intent: chitchat +``` + +You don't have to add a rule for activating the form as this is already covered by your +existing stories. However, you have to add a story for handle the submission of the +form. + +```yaml +rules: +- rule: Submit form + condition: + # Condition that form is active. + - active_loop: restaurant_form + steps: + - action: restaurant_form + - active_loop: null + - slot_was_set: + - requested_slot: null + # The action we want to run when the form is submitted. + - action: utter_submit +``` + +The last step is to implement a custom action to validate the form slots. Start by +adding the custom action to your domain: + +```yaml +actions: +# Other actions +# ... +- validate_restaurant_form +``` + +Then add a custom action which validates the `cuisine` slot: + +```python +class RestaurantFormValidator(Action): + def name(self) -> Text: + return "validate_restaurant_form" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + extracted_slots: Dict[Text, Any] = tracker.get_extracted_slots() + + cuisine_slot_value = extracted_slots.get("cuisine") + validated_slot_event = self.validate_cuisine( + cuisine_slot_value, dispatcher, tracker, domain + ) + return [validated_slot_event] + + @staticmethod + def cuisine_db() -> List[Text]: + """Database of supported cuisines""" + + return ["caribbean", "chinese", "french"] + + def validate_cuisine( + self, + value: Text, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> EventType: + """Validate cuisine value.""" + + if value.lower() in self.cuisine_db(): + # validation succeeded, set the value of the "cuisine" slot to value + return SlotSet("cuisine", value) + else: + dispatcher.utter_message(template="utter_wrong_cuisine") + # validation failed, set this slot to None, meaning the + # user will be asked for the slot again + return SlotSet("cuisine", None) +``` + +Please see [Forms](./forms.mdx) if you have further customizations in your +``FormAction``. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="migration-to-rasa-1-8"></a> + +## Rasa 1.7 to Rasa 1.8 + +:::caution +This is a release **breaking backwards compatibility**. +It is not possible to load previously trained models. Please make sure to retrain a +model before trying to use it with this improved version. + +::: + +### General + +* The [TED Policy](./policies.mdx#ted-policy) replaced the `keras_policy` as recommended machine + learning policy. New projects generated with `rasa init` will automatically use + this policy. In case you want to change your existing model configuration to use the + [TED Policy](./policies.mdx#ted-policy) add this to the `policies` section in your `config.yml` + and remove potentially existing `KerasPolicy` entries: + + ```yaml + policies: + # - ... other policies + - name: TEDPolicy + max_history: 5 + epochs: 100 + ``` + + The given snippet specifies default values for the parameters `max_history` and + `epochs`. `max_history` is particularly important and strongly depends on your stories. + Please see the docs of the [TED Policy](./policies.mdx#ted-policy) if you want to customize them. + +* All pre-defined pipeline templates are deprecated. **Any templates you use will be + mapped to the new configuration, but the underlying architecture is the same**. + Take a look at [Tuning Your Model](./tuning-your-model.mdx) to decide on what components you should use + in your configuration file. + +* The [Embedding Policy](./policies.mdx#embedding-policy) was renamed to [TED Policy](./policies.mdx#ted-policy). The functionality of the policy stayed the same. + Please update your configuration files to use `TEDPolicy` instead of `EmbeddingPolicy`. + +* Most of the model options for `EmbeddingPolicy`, `EmbeddingIntentClassifier`, and `ResponseSelector` got + renamed. Please update your configuration files using the following mapping: + + | Old model option | New model option | + |-----------------------------|-----------------------------------------------------| + |hidden_layers_sizes_a |dictionary “hidden_layers_sizes” with key “text” | + |hidden_layers_sizes_b |dictionary “hidden_layers_sizes” with key “label” | + |hidden_layers_sizes_pre_dial |dictionary “hidden_layers_sizes” with key “dialogue” | + |hidden_layers_sizes_bot |dictionary “hidden_layers_sizes” with key “label” | + |num_transformer_layers |number_of_transformer_layers | + |num_heads |number_of_attention_heads | + |max_seq_length |maximum_sequence_length | + |dense_dim |dense_dimension | + |embed_dim |embedding_dimension | + |num_neg |number_of_negative_examples | + |mu_pos |maximum_positive_similarity | + |mu_neg |maximum_negative_similarity | + |use_max_sim_neg |use_maximum_negative_similarity | + |C2 |regularization_constant | + |C_emb |negative_margin_scale | + |droprate_a |droprate_dialogue | + |droprate_b |droprate_label | + |evaluate_every_num_epochs |evaluate_every_number_of_epochs | + |evaluate_on_num_examples |evaluate_on_number_of_examples | + + Old configuration options will be mapped to the new names, and a warning will be thrown. + However, these will be deprecated in a future release. + +* The Embedding Intent Classifier is now deprecated and will be replaced by [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) + in the future. + `DIETClassfier` performs intent classification as well as entity recognition. + If you want to get the same model behavior as the current `EmbeddingIntentClassifier`, you can use + the following configuration of `DIETClassifier`: + + ```yaml + pipeline: + # - ... other components + - name: DIETClassifier + hidden_layers_sizes: + text: [256, 128] + number_of_transformer_layers: 0 + weight_sparsity: 0 + intent_classification: True + entity_recognition: False + use_masked_language_model: False + BILOU_flag: False + # ... any other parameters + ``` + + See [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) for more information about the new component. + Specifying `EmbeddingIntentClassifier` in the configuration maps to the above component definition, the + behavior is unchanged from previous versions. + +* `CRFEntityExtractor` is now deprecated and will be replaced by `DIETClassifier` in the future. If you want to + get the same model behavior as the current `CRFEntityExtractor`, you can use the following configuration: + + ```yaml + pipeline: + # - ... other components + - name: LexicalSyntacticFeaturizer + features: [ + ["low", "title", "upper"], + [ + "BOS", + "EOS", + "low", + "prefix5", + "prefix2", + "suffix5", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + ], + ["low", "title", "upper"], + ] + - name: DIETClassifier + intent_classification: False + entity_recognition: True + use_masked_language_model: False + number_of_transformer_layers: 0 + # ... any other parameters + ``` + + `CRFEntityExtractor` featurizes user messages on its own, it does not depend on any featurizer. + We extracted the featurization from the component into the new featurizer [LexicalSyntacticFeaturizer](./components/featurizers.mdx#lexicalsyntacticfeaturizer). Thus, + in order to obtain the same results as before, you need to add this featurizer to your pipeline before the + [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier). + Specifying `CRFEntityExtractor` in the configuration maps to the above component definition, the behavior + is unchanged from previous versions. + +* If your pipeline contains `CRFEntityExtractor` and `EmbeddingIntentClassifier` you can substitute both + components with [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier). You can use the following pipeline for that: + + ```yaml + pipeline: + # - ... other components + - name: LexicalSyntacticFeaturizer + features: [ + ["low", "title", "upper"], + [ + "BOS", + "EOS", + "low", + "prefix5", + "prefix2", + "suffix5", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + ], + ["low", "title", "upper"], + ] + - name: DIETClassifier + number_of_transformer_layers: 0 + # ... any other parameters + ``` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="migration-to-rasa-1-7"></a> + +## Rasa 1.6 to Rasa 1.7 + +### General + +* By default, the `EmbeddingIntentClassifier`, `EmbeddingPolicy`, and `ResponseSelector` will + now normalize the top 10 confidence results if the `loss_type` is `"softmax"` (which has been + default since 1.3, see [Rasa 1.2 to Rasa 1.3](./migration-guide.mdx#migration-to-rasa-1-3)). This is configurable via the `ranking_length` + configuration parameter; to turn off normalization to match the previous behavior, set `ranking_length: 0`. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="migration-to-rasa-1-3"></a> + +## Rasa 1.2 to Rasa 1.3 + +:::caution +This is a release **breaking backwards compatibility**. +It is not possible to load previously trained models. Please make sure to retrain a +model before trying to use it with this improved version. + +::: + +### General + +* Default parameters of `EmbeddingIntentClassifier` are changed. See + [Components](./components/intent-classifiers.mdx#embedding-intent-classifier) for details. + Architecture implementation is changed as well, so **old trained models cannot be loaded**. + Default parameters and architecture for `EmbeddingPolicy` are changed. See [Policies](./policies.mdx) for details. + It uses transformer instead of lstm. **Old trained models cannot be loaded**. + They use `inner` similarity and `softmax` loss by default instead of + `cosine` similarity and `margin` loss (can be set in config file). + They use `balanced` batching strategy by default to counteract class imbalance problem. + The meaning of `evaluate_on_num_examples` is changed. If it is non zero, random examples will be + picked by stratified split and used as **hold out** validation set, so they will be excluded from training data. + We suggest to set it to zero (default) if data set contains a lot of unique examples of dialogue turns. + Removed `label_tokenization_flag` and `label_split_symbol` from component. Instead moved intent splitting to `Tokenizer` components via `intent_tokenization_flag` and `intent_split_symbol` flag. + +* Default `max_history` for `EmbeddingPolicy` is `None` which means it'll use + the `FullDialogueTrackerFeaturizer`. We recommend to set `max_history` to + some finite value in order to use `MaxHistoryTrackerFeaturizer` + for **faster training**. See [Featurization of Conversations](./policies.mdx#featurization-conversations) for details. + We recommend to increase `batch_size` for `MaxHistoryTrackerFeaturizer` + (e.g. `"batch_size": [32, 64]`) + +* **Compare** mode of `rasa train core` allows the whole core config comparison. + Therefore, we changed the naming of trained models. They are named by config file + name instead of policy name. Old naming style will not be read correctly when + creating **compare** plots (`rasa test core`). Please remove old trained models + in comparison folder and retrain. Normal core training is unaffected. + +* We updated the **evaluation metric** for our **NER**. We report the weighted precision and f1-score. + So far we included `no-entity` in this report. However, as most of the tokens actually don't have + an entity set, this will influence the weighted precision and f1-score quite a bit. From now on we + exclude `no-entity` from the evaluation. The overall metrics now only include proper entities. You + might see a drop in the performance scores when running the evaluation again. + +* `/` is reserved as a delimiter token to distinguish between retrieval intent and the corresponding response text + identifier. Make sure you don't include `/` symbol in the name of your intents. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="migration-to-rasa-1-0"></a> + +## Rasa NLU 0.14.x and Rasa Core 0.13.x to Rasa 1.0 + +:::caution +This is a release **breaking backwards compatibility**. +It is not possible to load previously trained models. Please make sure to retrain a +model before trying to use it with this improved version. + +::: + +### General + +* The scripts in `rasa.core` and `rasa.nlu` can no longer be executed. To train, test, run, … an NLU or Core + model, you should now use the command line interface `rasa`. The functionality is, for the most part, the same as before. + Some changes in commands reflect the combined training and running of NLU and Core models, but NLU and Core can still + be trained and used individually. If you attempt to run one of the old scripts in `rasa.core` or `rasa.nlu`, + an error is thrown that points you to the command you + should use instead. See all the new commands at [Command Line Interface](./command-line-interface.mdx). + +* If you have written a custom output channel, all `send_` methods subclassed + from the `OutputChannel` class need to take an additional `\*\*kwargs` + argument. You can use these keyword args from your custom action code or the + templates in your domain file to send any extra parameters used in your + channel's send methods. + +* If you were previously importing the `Button` or `Element` classes from + `rasa_core.dispatcher`, these are now to be imported from `rasa_sdk.utils`. + +* Rasa NLU and Core previously used [separate configuration files](https://legacy-docs.rasa.com/docs/nlu/0.15.1/migrations/?&_ga=2.218966814.608734414.1560704810-314462423.1543594887#id1). + These two files should be merged into a single file either named `config.yml`, or passed via the `--config` parameter. + +### Script parameters + +* All script parameter names have been unified to follow the same schema. + Any underscores (`_`) in arguments have been replaced with dashes (`-`). + For example: `--max_history` has been changed to `--max-history`. You can + see all of the script parameters in the `--help` output of the commands + in the [Command Line Interface](./command-line-interface.mdx). + +* The `--num_threads` parameter was removed from the `run` command. The + server will always run single-threaded, but will now run asynchronously. If you want to + make use of multiple processes, feel free to check out the [Sanic server + documentation](https://sanic.readthedocs.io/en/latest/sanic/deploying.html#running-via-gunicorn). + +* To avoid conflicts in script parameter names, connectors in the `run` command now need to be specified with + `--connector`, as `-c` is no longer supported. The maximum history in the `rasa visualize` command needs to be + defined with `--max-history`. Output paths and log files cannot be specified with `-o` anymore; `--out` and + `--log-file` should be used. NLU data has been standarized to be `--nlu` and the name of + any kind of data files or directory to be `--data`. + +### HTTP API + +* There are numerous HTTP API endpoint changes which can be found [here](./http-api.mdx). diff --git a/docs/docs/model-configuration.mdx b/docs/docs/model-configuration.mdx new file mode 100644 index 000000000000..7986a01bb7c1 --- /dev/null +++ b/docs/docs/model-configuration.mdx @@ -0,0 +1,41 @@ +--- +id: model-configuration +sidebar_label: Overview +title: Model Configuration +description: Learn about model configuration for Rasa Open Source. +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +The configuration file defines the NLU and Core components that your model will use. The +`language` and `pipeline` keys specify how the NLU model should be built. You can +learn about choosing components for your NLU pipeline [here](./tuning-your-model.mdx). The +`policies` key defines the [policies](./policies.mdx) that the Core model will +use. + +## Suggested Config + +You always need to specify the `language` key in your `config.yml` file with the +2-letter ISO language code. + +The two other keys (`pipeline` and `policies`) can be left out or left empty. When +running `rasa train`, the Suggested Config feature will select a default configuration +for the missing key(s) which will then be used to train the model. + +Example `config.yml` file: + +```yaml (docs/sources/data/configs_for_docs/example_for_suggested_config.yml) +``` + +The selected configuration will also be written as comments into the `config.yml` file, +so you can see which configuration was used. For the example above, the resulting file +might look e.g. like this: + +```yaml (docs/sources/data/configs_for_docs/example_for_suggested_config_after_train.yml) +``` + +If you like, you can then un-comment the suggested configuration for one or both of the +keys and make modifications. Note that this will disable automatic suggestions for this +key when training again. +As long as you leave the configuration commented out and don't specify any configuration +for a key yourself, a default configuration will be suggested whenever you train a new +model. diff --git a/docs/docs/model-storage.mdx b/docs/docs/model-storage.mdx new file mode 100644 index 000000000000..601984a4e98e --- /dev/null +++ b/docs/docs/model-storage.mdx @@ -0,0 +1,158 @@ +--- +id: model-storage +sidebar_label: Model Storage +title: Model Storage +--- + +Rasa can load your model in three different ways: + +1. Fetch the model from a server (see [Fetching Models from a Server](./model-storage.mdx#server-fetch-from-server)), or + +2. Fetch the model from a remote storage (see [Remote Storage](./model-storage.mdx#server-fetch-from-remote-storage)). + +3. Load the model specified via `-m` from your local storage system. + +Rasa tries to load a model in the above mentioned order, i.e. it only tries to load your model from your local +storage system if no model server and no remote storage were configured. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="server-fetch-from-server"></a> + +## Fetching Models from a Server + +You can configure the HTTP server to fetch models from another URL +by adding it to your `endpoints.yml`. The server will then +reguarly query this URL for zipped models. + +```yaml +models: + url: http://my-server.com/models/default@latest + wait_time_between_pulls: 10 # In seconds, optional, default: 100 +``` + +:::note +If you want to pull the model just once from the server, set +`wait_time_between_pulls` to `None`. + +::: + +:::note +Your model server must provide zipped Rasa models, and have +`{"ETag": <model_hash_string>}` as one of its headers. Rasa will +only download a new model if this model hash has changed. + +::: + +Rasa sends requests to your model server with an `If-None-Match` +header that contains the current model hash. If your model server can +provide a model with a different hash from the one you sent, it should send it +in as a zip file with an `ETag` header containing the new hash. If not, Rasa +expects an empty response with a `204` or `304` status code. + +An example request Rasa might make to your model server looks like this: + +```bash +curl --header "If-None-Match: d41d8cd98f00b204e9800998ecf8427e" http://my-server.com/models/default@latest +``` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="server-fetch-from-remote-storage"></a> + +## Fetching Models from Remote Storage + +You can also configure the Rasa server to fetch your model from a remote storage: + +```bash +rasa run -m 20190506-100418.tar.gz --remote-storage aws +``` + +The model will be downloaded and stored in a temporary directory on your local storage system. + +Rasa supports using [S3](https://aws.amazon.com/s3/) , +[GCS](https://cloud.google.com/storage/) and [Azure Storage](https://azure.microsoft.com/services/storage/) to save your models. + +* Amazon S3 Storage + + S3 is supported using the `boto3` module which you can + install with `pip install boto3`. + + Start the Rasa server with `remote-storage` option set to + `aws`. Get your S3 credentials and set the following + environment variables: + + * `AWS_SECRET_ACCESS_KEY` + + * `AWS_ACCESS_KEY_ID` + + * `AWS_DEFAULT_REGION` + + * `BUCKET_NAME` + + * `AWS_ENDPOINT_URL` + + If there is no bucket with the name `BUCKET_NAME`, Rasa will create it. + +* Google Cloud Storage + + GCS is supported using the `google-cloud-storage` package, + which you can install with `pip install google-cloud-storage`. + + Start the Rasa server with `remote-storage` option set to `gcs`. + + When running on google app engine and compute engine, the auth + credentials are already set up. For running locally or elsewhere, + checkout their + [client repo](https://github.com/GoogleCloudPlatform/python-docs-samples/tree/master/storage/cloud-client#authentication) + for details on setting up authentication. It involves creating + a service account key file from google cloud console, + and setting the `GOOGLE_APPLICATION_CREDENTIALS` environment + variable to the path of that key file. + +* Azure Storage + + Azure is supported using the legacy `azure-storage-blob` package (v 2.1.0), + which you can install with `pip install -I azure-storage-blob==2.1.0`. + + Start the Rasa server with `remote-storage` option set to `azure`. + + The following environment variables must be set: + + * `AZURE_CONTAINER` + + * `AZURE_ACCOUNT_NAME` + + * `AZURE_ACCOUNT_KEY` + + If there is no container with the name `AZURE_CONTAINER`, Rasa will create it. + +* Other Remote Storage + + Support for other remote storages can be achieved by using an external library or implementing + a custom module for it by extending the `rasa.nlu.persistor.Persistor` class. + + Start the Rasa server with `remote-storage` option set to the module path of model persistor. + + An example command is: + + ```bash + rasa run --remote-storage <your module>.<class name> + ``` + +Models are gzipped before they are saved in the cloud. The gzipped file naming convention +is `{MODEL_NAME}.tar.gz` and it is stored in the root folder of the storage service. +Currently, you are not able to manually specify the path on the cloud storage. + +If storing trained models, Rasa will gzip the new model and upload it to the container. If retrieving/loading models +from the cloud storage, Rasa will download the gzipped model locally and extract the contents to a temporary directory. + + +## Fetching Models from Local Storage + +If you do not configure any other storage, Rasa will try to load models from local storage. +You can specify the path to your model with the `-m` parameter: + +```bash +rasa run -m models/20190506-100418.tar.gz +``` + +You can also specify a path to a directory instead of a specific model; Rasa will +then load the model with the latest timestamp in that directory. If you do not +specify the `-m` parameter, it will look for models in the default `models` path. diff --git a/docs/docs/nlg.mdx b/docs/docs/nlg.mdx new file mode 100644 index 000000000000..b322c8e66cea --- /dev/null +++ b/docs/docs/nlg.mdx @@ -0,0 +1,115 @@ +--- +id: nlg +sidebar_label: NLG +title: NLG Servers +--- + + +Retraining the bot just to change the text copy can be suboptimal for +some workflows. That's why Core also allows you to outsource the +response generation and separate it from the dialogue learning. + +The assistant will still learn to predict actions and to react to user input +based on past dialogues, but the responses it sends back to the user +are generated outside of Rasa Core. + +If the assistant wants to send a message to the user, it will call an +external HTTP server with a `POST` request. To configure this endpoint, +you need to create an `endpoints.yml` and pass it either to the `run` +or `server` script. The content of the `endpoints.yml` should be + +```yaml (docs/sources/data/test_endpoints/example_endpoints.yml) +``` + +Then pass the `enable-api` flag to the `rasa run` command when starting +the server: + +```shell +rasa run \ + --enable-api \ + -m examples/babi/models \ + --log-file out.log \ + --endpoints endpoints.yml +``` + +The body of the `POST` request sent to the endpoint will look +like this: + +```json +{ + "tracker": { + "latest_message": { + "text": "/greet", + "intent_ranking": [ + { + "confidence": 1.0, + "name": "greet" + } + ], + "intent": { + "confidence": 1.0, + "name": "greet" + }, + "entities": [] + }, + "sender_id": "22ae96a6-85cd-11e8-b1c3-f40f241f6547", + "paused": false, + "latest_event_time": 1531397673.293572, + "slots": { + "name": null + }, + "events": [ + { + "timestamp": 1531397673.291998, + "event": "action", + "name": "action_listen" + }, + { + "timestamp": 1531397673.293572, + "parse_data": { + "text": "/greet", + "intent_ranking": [ + { + "confidence": 1.0, + "name": "greet" + } + ], + "intent": { + "confidence": 1.0, + "name": "greet" + }, + "entities": [] + }, + "event": "user", + "text": "/greet" + } + ] + }, + "arguments": {}, + "template": "utter_greet", + "channel": { + "name": "collector" + } +} +``` + +The endpoint then needs to respond with the generated response: + +```json +{ + "text": "hey there", + "buttons": [], + "image": null, + "elements": [], + "attachments": [] +} +``` + +Rasa will then use this response and sent it back to the user. + +:::note +If you use an external NLG service, you don't need to specify the +responses in the domain, but you still need to add the utterance names +to the actions list of the domain. + +::: diff --git a/docs/docs/nlu-training-data.mdx b/docs/docs/nlu-training-data.mdx new file mode 100644 index 000000000000..4562a3b47e8c --- /dev/null +++ b/docs/docs/nlu-training-data.mdx @@ -0,0 +1,170 @@ +--- +id: nlu-training-data +sidebar_label: NLU Training Data +title: NLU Training Data +description: Read more about how to format training data with Rasa NLU for open source natural language processing. +--- + +NLU training data is structured into different parts: + +* training examples + +* synonyms + +* regex features and + +* lookup tables + +While training examples is the only part that is mandatory, including the others will help the NLU model +learn the domain with fewer examples and also help it be more confident of its predictions. + +Synonyms will map extracted entities to the same name, for example mapping “my savings account” to simply “savings”. +However, this only happens *after* the entities have been extracted, so you need to provide examples with the synonyms +present so that Rasa can learn to pick them up. + +Lookup tables are lists of words used to generate +case-insensitive regex patterns that are added to the regex features. + +:::note +The common theme here is that training examples, regex features and lookup tables merely act as cues to the final NLU +model by providing additional features to the machine learning algorithm during training. Therefore, it must not be +assumed that having a single example would be enough for the model to robustly identify intents and/or entities +across all variants of that example. + +::: + +:::note +`/` symbol is reserved as a delimiter to separate retrieval intents from response text identifiers. Make sure not +to use it in the name of your intents. + +::: + +## Improving Intent Classification and Entity Recognition + +### Regular Expression Features (regex) + +For example, the names of German streets often end in `strasse`. By adding this as a regex, +we are telling the model to pay attention to words ending this way, and will quickly learn to +associate that with a location entity. + + +Regular expressions can be used in two different ways: + +1. They can be used to support the intent classification and entity extraction when using the [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer) component in the pipeline. + Each of the regexes then provide the intent classifier or entity extractor with an extra binary feature, which says if the regex was found or not. + +2. They can be used to directly extract entities from a user messages when using the [RegexEntityExtractor](./components/entity-extractors.mdx#regexentityextractor) component in the pipeline. + +For example, if your entity has a deterministic structure (like a zipcode or an email address), you can use a regular +expression to ease detection of that entity (using the [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer)) or to directly extract the entities from +the user message (using the [RegexEntityExtractor](./components/entity-extractors.mdx#regexentityextractor)). + + +In case you are using the regular expressions for the [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer) the name of the regular expression does +not matter. If does not define the entity nor the intent, it is just a human readable description for you to remember +what this regex is used for and is the title of the corresponding pattern feature. +If you want to use the [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer) you can also use the regex features to improve the intent +classification performance, for example, by defining a greet clause: + + +If you are using the regular expressions to directly extract entities using the [RegexEntityExtractor](./components/entity-extractors.mdx#regexentityextractor), +the name of the regular expression should match the name of the entity you want to extract. + +Try to create your regular expressions in a way that they match as few words as possible. E.g. using `hey[^\\s]*` +instead of `hey.\*`, as the later one might match the whole message whereas the first one only matches a single word. + +When using the [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer), the regex features for entity extraction are currently only supported by the +`CRFEntityExtractor` and the `DIETClassifier` component! Hence, other entity extractors, like +`MitieEntityExtractor` or `SpacyEntityExtractor` won't use the generated features and their +presence will not improve entity recognition for these extractors. Currently, all intent classifiers make use of +available regex features. + +:::note +Regex features only define entities when used in combination with the [RegexEntityExtractor](./components/entity-extractors.mdx#regexentityextractor). +Otherwise they don't define entities nor intents! They simply provide patterns to help the classifier +recognize entities and related intents. Hence, you still need to provide intent & entity examples as part of your +training data! + +::: + + +### Entities Roles and Groups + +:::caution +This feature is experimental. +We introduce experimental features to get feedback from our community, so we encourage you to try it out! +However, the functionality might be changed or removed in the future. +If you have feedback (positive or negative) please share it with us on the [forum](https://forum.rasa.com). + +::: + +Assigning custom entity labels to words, allow you to define certain concepts in the data. +For example, we can define what a city is: + +``` +I want to fly from [Berlin]{"entity": "city"} to [San Francisco]{"entity": "city"} . +``` + +However, sometimes you want to specify entities even further. +Let's assume we want to build an assistant that should book a flight for us. +The assistant needs to know which of the two cities in the example above is the departure city and which is the +destination city. +`Berlin` and `San Francisco` are still cities, but they play a different role in our example. +To distinguish between the different roles, you can assign a role label in addition to the entity label. + +``` +- I want to fly from [Berlin]{"entity": "city", "role": "departure"} to [San Francisco]{"entity": "city", "role": "destination"}. +``` + +You can also group different entities by specifying a group label next to the entity label. +The group label can, for example, be used to define different orders. +In the following example we use the group label to reference what toppings goes with which pizza and +what size which pizza has. + +``` +Give me a [small]{"entity": "size", "group": "1"} pizza with [mushrooms]{"entity": "topping", "group": "1"} and +a [large]{"entity": "size", "group": "2"} [pepperoni]{"entity": "topping", "group": "2"} +``` + +See [Training Data Format](training-data-format.mdx#entities) for details on how to define entities with roles and groups in your training data. + +The entity object returned by the extractor will include the detected role/group label. + +```json +{ + "text": "Book a flight from Berlin to SF", + "intent": "book_flight", + "entities": [ + { + "start": 19, + "end": 25, + "value": "Berlin", + "entity": "city", + "role": "departure", + "extractor": "DIETClassifier", + }, + { + "start": 29, + "end": 31, + "value": "San Francisco", + "entity": "city", + "role": "destination", + "extractor": "DIETClassifier", + } + ] +} +``` + +:::note +Composite entities are currently only supported by the [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) and [CRFEntityExtractor](./components/entity-extractors.mdx#crfentityextractor). + +::: + +In order to properly train your model with entities that have roles/groups, make sure to include enough training data +examples for every combination of entity and role/group label. +Also make sure to have some variations in your training data, so that the model is able to generalize. +For example, you should not only have example like `fly FROM x TO y`, but also include examples like +`fly TO y FROM x`. + +To fill slots from entities with a specific role/group, you need to either define a custom slot mappings using +[Forms](forms.mdx) or use [Custom Actions](custom-actions.mdx) to extract the corresponding entity directly from the tracker. diff --git a/docs/docs/policies.mdx b/docs/docs/policies.mdx new file mode 100644 index 000000000000..52aa295caf01 --- /dev/null +++ b/docs/docs/policies.mdx @@ -0,0 +1,746 @@ +--- +id: policies +sidebar_label: Policies +title: Policies +description: Define and train customized policy configurations to optimize your contextual assistant for longer contexts or unseen utterances which require generalization. +--- + +<!-- TODO: replace with 2 files for ML policies and RulePolicy --> + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="policy-file"></a> + +## Configuring Policies + +The `rasa.core.policies.Policy` class decides which action to take +at every step in the conversation. + +There are different policies to choose from, and you can include +multiple policies in a single configuration. + +:::note +Per default a maximum of 10 next actions can be predicted +by the agent after every user message. To update this value +you can set the environment variable `MAX_NUMBER_OF_PREDICTIONS` +to the desired number of maximum predictions. + +::: + +Your project's `config.yml` file takes a `policies` key +which you can use to customize the policies your assistant uses. +You can also leave this key out or empty. Then the +[Suggested Config](.//model-configuration.mdx#suggested-config) +feature will choose some default policies for you. + +You can also reference custom policies in your configuration. In the example below, the +last two lines show how to use a custom policy class and pass arguments to it. + +```yaml +policies: + - name: "TEDPolicy" + featurizer: + - name: MaxHistoryTrackerFeaturizer + max_history: 5 + state_featurizer: + - name: BinarySingleStateFeaturizer + - name: "RulePolicy" + - name: "path.to.your.policy.class" + arg1: "..." +``` + +### Max History + +One important hyperparameter for Rasa Core policies is the `max_history`. +This controls how much dialogue history the model looks at to decide which +action to take next. + +You can set the `max_history` by passing it to your policy's `Featurizer` +in the policy configuration yaml file. + +:::note +Only the `MaxHistoryTrackerFeaturizer` uses a max history, +whereas the `FullDialogueTrackerFeaturizer` always looks at +the full conversation history. See [Featurization of Conversations](./policies.mdx#featurization-conversations) for details. + +::: + +As an example, let's say you have an `out_of_scope` intent which +describes off-topic user messages. If your bot sees this intent multiple +times in a row, you might want to tell the user what you can help them +with. So your story might look like this: + +```story +* out_of_scope + - utter_default +* out_of_scope + - utter_default +* out_of_scope + - utter_help_message +``` + +For Rasa Core to learn this pattern, the `max_history` +has to be at least 4. + +If you increase your `max_history`, your model will become bigger and +training will take longer. If you have some information that should +affect the dialogue very far into the future, you should store it as a +slot. Slot information is always available for every featurizer. + +### Data Augmentation + +When you train a model, by default Rasa Core will create +longer stories by randomly gluing together +the ones in your stories files. +This is because if you have stories like: + +```story +# thanks +* thankyou + - utter_youarewelcome + +# bye +* goodbye + - utter_goodbye +``` + +You actually want to teach your policy to **ignore** the dialogue history +when it isn't relevant and just respond with the same action no matter +what happened before. + +You can alter this behavior with the `--augmentation` flag. +Which allows you to set the `augmentation_factor`. +The `augmentation_factor` determines how many augmented stories are +subsampled during training. The augmented stories are subsampled before training +since their number can quickly become very large, and we want to limit it. +The number of sampled stories is `augmentation_factor` x10. +By default augmentation is set to 20, resulting in a maximum of 200 augmented stories. + +`--augmentation 0` disables all augmentation behavior. +The memoization based policies are not affected by augmentation +(independent of the `augmentation_factor`) and will automatically +ignore all augmented stories. + +## Action Selection + +At every turn, each policy defined in your configuration will +predict a next action with a certain confidence level. For more information +about how each policy makes its decision, read into the policy's description below. +The bot's next action is then decided by the policy that predicts with the highest confidence. + +In the case that two policies predict with equal confidence (for example, the Memoization +and Mapping Policies always predict with confidence of either 0 or 1), the priority of the +policies is considered. Rasa policies have default priorities that are set to ensure the +expected outcome in the case of a tie. They look like this, where higher numbers have higher priority: + +1. `TEDPolicy` and `SklearnPolicy` + +2. `MappingPolicy` + +3. `MemoizationPolicy` and `AugmentedMemoizationPolicy` + +4. `FallbackPolicy` and `TwoStageFallbackPolicy` + +5. `RulePolicy` and `FormPolicy` + +This priority hierarchy ensures that, for example, if there is an intent with a mapped action, but the NLU confidence is not +above the `nlu_threshold`, the bot will still fall back. In general, it is not recommended to have more +than one policy per priority level, and some policies on the same priority level, such as the two +fallback policies, strictly cannot be used in tandem. + +If you create your own policy, use these priorities as a guide for figuring out the priority of your policy. +If your policy is a machine learning policy, it should most likely have priority 1, the same as the Rasa machine +learning policies. + +:::caution +All policy priorities are configurable via the `priority:` parameter in the configuration, +but we **do not recommend** changing them outside of specific cases such as custom policies. +Doing so can lead to unexpected and undesired bot behavior. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="embedding-policy"></a> + +## Embedding Policy + +:::caution +`EmbeddingPolicy` was renamed to `TEDPolicy`. Please use [TED Policy](./policies.mdx#ted-policy) instead of `EmbeddingPolicy` +in your policy configuration. The functionality of the policy stayed the same. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="ted-policy"></a> + +## TED Policy + +The Transformer Embedding Dialogue (TED) Policy is described in +[our paper](https://arxiv.org/abs/1910.00486). + +This policy has a pre-defined architecture, which comprises the +following steps: + +* concatenate user input (user intent and entities), previous system actions, slots and active forms for each time + step into an input vector to pre-transformer embedding layer; + +* feed it to transformer; + +* apply a dense layer to the output of the transformer to get embeddings of a dialogue for each time step; + +* apply a dense layer to create embeddings for system actions for each time step; + +* calculate the similarity between the dialogue embedding and embedded system actions. + This step is based on the [StarSpace](https://arxiv.org/abs/1709.03856) idea. + +It is recommended to use `state_featurizer=LabelTokenizerSingleStateFeaturizer(...)` +(see [Featurization of Conversations](./policies.mdx#featurization-conversations) for details). + +**Configuration:** + +Configuration parameters can be passed as parameters to the `TEDPolicy` within the configuration file. +If you want to adapt your model, start by modifying the following parameters: + +* `epochs`: + This parameter sets the number of times the algorithm will see the training data (default: `1`). + One `epoch` is equals to one forward pass and one backward pass of all the training examples. + Sometimes the model needs more epochs to properly learn. + Sometimes more epochs don't influence the performance. + The lower the number of epochs the faster the model is trained. + +* `hidden_layers_sizes`: + This parameter allows you to define the number of feed forward layers and their output + dimensions for dialogues and intents (default: `dialogue: [], label: []`). + Every entry in the list corresponds to a feed forward layer. + For example, if you set `dialogue: [256, 128]`, we will add two feed forward layers in front of + the transformer. The vectors of the input tokens (coming from the dialogue) will be passed on to those + layers. The first layer will have an output dimension of 256 and the second layer will have an output + dimension of 128. If an empty list is used (default behavior), no feed forward layer will be + added. + Make sure to use only positive integer values. Usually, numbers of power of two are used. + Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the + value before. + +* `number_of_transformer_layers`: + This parameter sets the number of transformer layers to use (default: `1`). + The number of transformer layers corresponds to the transformer blocks to use for the model. + +* `transformer_size`: + This parameter sets the number of units in the transformer (default: `128`). + The vectors coming out of the transformers will have the given `transformer_size`. + +* `weight_sparsity`: + This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers + in the model (default: `0.8`). The value should be between 0 and 1. If you set `weight_sparsity` + to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not + set `weight_sparsity` to 1 as this would result in all kernel weights being 0, i.e. the model is not able + to learn. + +:::caution +Pass an appropriate number, for example 50, of `epochs` to the `TEDPolicy`, otherwise the policy will +be trained only for `1` epoch. + +::: + +:::caution +Default `max_history` for this policy is `None` which means it'll use the +`FullDialogueTrackerFeaturizer`. We recommend to set `max_history` to some finite value in order to +use `MaxHistoryTrackerFeaturizer` for **faster training**. See [Featurization of Conversations](./policies.mdx#featurization-conversations) for +details. We recommend to increase `batch_size` for `MaxHistoryTrackerFeaturizer` +(e.g. `"batch_size": [32, 64]`) + +::: + +<details><summary>The above configuration parameters are the ones you should configure to fit your model to your data. +However, additional parameters exist that can be adapted.</summary> + +``` ++---------------------------------+------------------+--------------------------------------------------------------+ +| Parameter | Default Value | Description | ++=================================+==================+==============================================================+ +| hidden_layers_sizes | dialogue: [] | Hidden layer sizes for layers before the embedding layers | +| | label: [] | for dialogue and labels. The number of hidden layers is | +| | | equal to the length of the corresponding. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| transformer_size | 128 | Number of units in transformer. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| number_of_transformer_layers | 1 | Number of transformer layers. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| number_of_attention_heads | 4 | Number of attention heads in transformer. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| max_relative_position | None | Maximum position for relative embeddings. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| batch_size | [8, 32] | Initial and final value for batch sizes. | +| | | Batch size will be linearly increased for each epoch. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| batch_strategy | "balanced" | Strategy used when creating batches. | +| | | Can be either 'sequence' or 'balanced'. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| epochs | 1 | Number of epochs to train. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| random_seed | None | Set random seed to any 'int' to get reproducible results. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| embedding_dimension | 20 | Dimension size of embedding vectors. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | +| | | their similarity to the user input during training. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | +| | | or 'inner'. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| ranking_length | 10 | Number of top actions to normalize scores for loss type | +| | | 'softmax'. Set to 0 to turn off normalization. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | +| | | embedding vectors for correct labels. | +| | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| maximum_negative_similarity | -0.2 | Maximum negative similarity for incorrect labels. | +| | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | +| | | over incorrect intent labels, used only if 'loss_type' is | +| | | set to 'margin'. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| scale_loss | True | Scale loss inverse proportionally to confidence of correct | +| | | prediction. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| regularization_constant | 0.001 | The scale of regularization. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| negative_margin_scale | 0.8 | The scale of how important it is to minimize the maximum | +| | | similarity between embeddings of different labels. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| drop_rate_dialogue | 0.1 | Dropout rate for embedding layers of dialogue features. | +| | | Value should be between 0 and 1. | +| | | The higher the value the higher the regularization effect. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| drop_rate_label | 0.0 | Dropout rate for embedding layers of label features. | +| | | Value should be between 0 and 1. | +| | | The higher the value the higher the regularization effect. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | +| | | The higher the value the higher the regularization effect. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | +| | | Value should be between 0 and 1. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | +| | | Set to '-1' to evaluate just once at the end of training. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | +| | | Large values may hurt performance, e.g. model accuracy. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| tensorboard_log_directory | None | If you want to use tensorboard to visualize training | +| | | metrics, set this option to a valid output directory. You | +| | | can view the training metrics after training in tensorboard | +| | | via 'tensorboard --logdir <path-to-given-directory>'. | ++---------------------------------+------------------+--------------------------------------------------------------+ +| tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | +| | | logged. Either after every epoch ('epoch') or for every | +| | | training step ('minibatch'). | ++---------------------------------+------------------+--------------------------------------------------------------+ +``` + +</details> + +:::caution +If `evaluate_on_number_of_examples` is non zero, random examples will be picked by stratified split and +used as **hold out** validation set, so they will be excluded from training data. +We suggest to set it to zero if data set contains a lot of unique examples of dialogue turns. + +::: + +:::note +For `cosine` similarity `maximum_positive_similarity` and `maximum_negative_similarity` should +be between `-1` and `1`. + +::: + +:::note +There is an option to use linearly increasing batch size. The idea comes from +[https://arxiv.org/abs/1711.00489](https://arxiv.org/abs/1711.00489). In order to do it pass a list to `batch_size`, e.g. +`"batch_size": [8, 32]` (default behavior). If constant `batch_size` is required, pass an `int`, +e.g. `"batch_size": 8`. + +::: + +:::note +The parameter `maximum_negative_similarity` is set to a negative value to mimic the original +starspace algorithm in the case `maximum_negative_similarity = maximum_positive_similarity` and +`use_maximum_negative_similarity = False`. See [starspace paper](https://arxiv.org/abs/1709.03856) +for details. + +::: + +## Rule Policy + +The `Rule Policy` is a policy that handles conversation parts that follow +a fixed behavior. Please see [Rules](./rules.mdx) for further information. + +## Memoization Policy + +The `MemoizationPolicy` just memorizes the conversations in your +training data. It predicts the next action with confidence `1.0` +if this exact conversation exists in the training data, otherwise it +predicts `None` with confidence `0.0`. + +## Augmented Memoization Policy + +The `AugmentedMemoizationPolicy` remembers examples from training +stories for up to `max_history` turns, just like the `MemoizationPolicy`. +Additionally, it has a forgetting mechanism that will forget a certain amount +of steps in the conversation history and try to find a match in your stories +with the reduced history. It predicts the next action with confidence `1.0` +if a match is found, otherwise it predicts `None` with confidence `0.0`. + +:::note +If you have dialogues where some slots that are set during +prediction time might not be set in training stories (e.g. in training +stories starting with a reminder not all previous slots are set), +make sure to add the relevant stories without slots to your training +data as well. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="mapping-policy"></a> + +## Mapping Policy + +:::caution +The `MappingPolicy` is deprecated. Please see [Rules](./rules.mdx) how to implement +its behavior using the [Rule Policy](./policies.mdx#rule-policy). If you previously used +the `MappingPolicy`, see the +[migration guide](./migration-guide.mdx#migrating-from-the-mapping-policy). +::: + +The `MappingPolicy` can be used to directly map intents to actions. The +mappings are assigned by giving an intent the property `triggers`, e.g.: + +```yaml +intents: + - ask_is_bot: + triggers: action_is_bot +``` + +An intent can only be mapped to at most one action. The bot will run +the mapped action once it receives a message of the triggering intent. Afterwards, +it will listen for the next message. With the next +user message, normal prediction will resume. + +If you do not want your intent-action mapping to affect the dialogue +history, the mapped action must return a `UserUtteranceReverted()` +event. This will delete the user's latest message, along with any events that +happened after it, from the dialogue history. This means you should not +include the intent-action interaction in your stories. + +For example, if a user asks “Are you a bot?” off-topic in the middle of the +flow, you probably want to answer without that interaction affecting the next +action prediction. A triggered custom action can do anything, but here's a +simple example that dispatches a bot utterance and then reverts the interaction: + +```python +class ActionIsBot(Action): + """Revertible mapped action for utter_is_bot""" + + def name(self): + return "action_is_bot" + + def run(self, dispatcher, tracker, domain): + dispatcher.utter_template(template="utter_is_bot") + return [UserUtteranceReverted()] +``` + +:::note +If you use the `MappingPolicy` to predict bot utterance actions directly (e.g. +`triggers: utter_{}`), these interactions must go in your stories, as in this +case there is no `UserUtteranceReverted()` and the +intent and the mapped response action will appear in the dialogue history. + +::: + +:::note +The MappingPolicy is also responsible for executing the default actions `action_back` +and `action_restart` in response to `/back` and `/restart`. If it is not included +in your policy example these intents will not work. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="fallback-policy"></a> + +## Fallback Policy + +:::caution +The `FallbackPolicy` is deprecated. Please see [Fallback Actions](./fallback-handoff.mdx#fallbackactions) how to implement +its behavior using the [Rule Policy](./policies.mdx#rule-policy). If you previously used +the `FallbackPolicy`, see the +[migration guide](./migration-guide.mdx#migrating-from-the-fallback-policy). +::: + +The `FallbackPolicy` invokes a predefined action if at least one of the following occurs: + +1. The intent recognition has a confidence below `nlu_threshold`. + +2. The highest ranked intent differs in confidence with the second highest + ranked intent by less than `ambiguity_threshold`. + +3. None of the dialogue policies predict an action with confidence higher than `core_threshold`. + +**Configuration:** + +The thresholds and fallback action can be adjusted in the policy configuration +file as parameters of the `FallbackPolicy`: + +```yaml +policies: + - name: "FallbackPolicy" + nlu_threshold: 0.3 + ambiguity_threshold: 0.1 + core_threshold: 0.3 + fallback_action_name: 'action_default_fallback' +``` + +| | | +|-----------------------|-----------------------------------------------------------------------------------------------------------------| +|`nlu_threshold` |Min confidence needed to accept an NLU prediction | +|`ambiguity_threshold` |Min amount by which the confidence of the top intent must exceed that of the second highest ranked intent. | +|`core_threshold` |Min confidence needed to accept an action prediction from Rasa Core | +|`fallback_action_name` |Name of the fallback action to be called if the confidence of intent or action is below the respective threshold | + +`action_default_fallback` is a default action in Rasa Core which sends the +`utter_default` response to the user (you will have to define this response +in the `responses:` section of your domain file). You can also choose your +`fallback_action_name` to be one of your custom actions. + +You can also configure the `FallbackPolicy` in your python code: + +```python +from rasa.core.policies.fallback import FallbackPolicy +from rasa.core.policies.keras_policy import TEDPolicy +from rasa.core.agent import Agent + +fallback = FallbackPolicy(fallback_action_name="action_default_fallback", + core_threshold=0.3, + nlu_threshold=0.3, + ambiguity_threshold=0.1) + +agent = Agent("domain.yml", policies=[TEDPolicy(), fallback]) +``` + +:::note +You can include either the `FallbackPolicy` or the +`TwoStageFallbackPolicy` in your configuration, but not both. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="two-stage-fallback-policy"></a> + +## Two-Stage Fallback Policy + +:::caution +The `TwoStageFallbackPolicy` is deprecated. Please see [Fallback Actions](./fallback-handoff.mdx#fallbackactions) how to implement +its behavior using the [Rule Policy](./policies.mdx#rule-policy). If you previously used +the `TwoStageFallbackPolicy`, see the +[migration guide](./migration-guide.mdx#migrating-from-the-two-stage-fallback-policy). +::: + +The `TwoStageFallbackPolicy` handles low NLU confidence in multiple stages +by trying to disambiguate the user input. + +* If an NLU prediction has a low confidence score or is not significantly higher + than the second highest ranked prediction, the user is asked to affirm + the classification of the intent. + + * If they affirm, the story continues as if the intent was classified + with high confidence from the beginning. + + * If they deny, the user is asked to rephrase their message. + +* Rephrasing + + * If the classification of the rephrased intent was confident, the story + continues as if the user had this intent from the beginning. + + * If the rephrased intent was not classified with high confidence, the user + is asked to affirm the classified intent. + +* Second affirmation + + * If the user affirms the intent, the story continues as if the user had + this intent from the beginning. + + * If the user denies, the original intent is classified as the specified + `deny_suggestion_intent_name`, and an ultimate fallback action + is triggered (e.g. a handoff to a human). + +### Configuration + +Rasa provides the default implementations of +`action_default_ask_affirmation` and `action_default_ask_rephrase`. +The default implementation of `action_default_ask_rephrase` action utters +the response `utter_ask_rephrase`, so be sure to specify this +response in your domain file. +The implementation of both actions can be overwritten with [custom actions](./actions#custom-actions). + +You can specify the core fallback action as well as the ultimate NLU +fallback action as parameters to `TwoStageFallbackPolicy` in your +policy configuration file. + +```yaml +policies: + - name: TwoStageFallbackPolicy + nlu_threshold: 0.3 + core_threshold: 0.3 + ambiguity_threshold: 0.1 + fallback_core_action_name: "action_default_fallback" + fallback_nlu_action_name: "action_default_fallback" + deny_suggestion_intent_name: "out_of_scope" +``` + +| | | +|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`nlu_threshold` |Min confidence needed to accept an NLU prediction | +|`ambiguity_threshold` |Min amount by which the confidence of the top intent must exceed that of the second highest ranked intent. | +|`core_threshold` |Min confidence needed to accept an action prediction from Rasa Core | +|`fallback_core_action_name` |Name of the fallback action to be called if the confidence of Rasa Core action prediction is below the `core_threshold`. This action is to propose the recognized intents | +|`fallback_nlu_action_name` |Name of the fallback action to be called if the confidence of Rasa NLU intent classification is below the `nlu_threshold`. This action is called when the user denies the second time | +|`deny_suggestion_intent_name` |The name of the intent which is used to detect that the user denies the suggested intents | + +:::note +You can include either the `FallbackPolicy` or the +`TwoStageFallbackPolicy` in your configuration, but not both. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="form-policy"></a> + +## Form Policy + +:::caution +The `FormPolicy` is deprecated. Please see [Forms](./forms.mdx) how to implement +its behavior using the [Rule Policy](./policies.mdx#rule-policy). If you previously used +the `FormPolicy`, see the +[migration guide](./migration-guide.mdx#form-policy). +::: + +The `FormPolicy` is an extension of the `MemoizationPolicy` which +handles the filling of forms. Once a `FormAction` is called, the +`FormPolicy` will continually predict the `FormAction` until all required +slots in the form are filled. + + +## Conversation Featurization + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="featurization-conversations"></a> + +In order to apply machine learning algorithms to conversational AI, we need +to build up vector representations of conversations. + +Each story corresponds to a tracker which consists of the states of the +conversation just before each action was taken. + +### State Featurizers + +Every event in a trackers history creates a new state (e.g. running a bot +action, receiving a user message, setting slots). Featurizing a single state +of the tracker has a couple steps: + +1. **Tracker provides a bag of active features**: + + * features indicating intents and entities, if this is the first + state in a turn, e.g. it's the first action we will take after + parsing the user's message. (e.g. + `[intent_restaurant_search, entity_cuisine]` ) + + * features indicating which slots are currently defined, e.g. + `slot_location` if the user previously mentioned the area + they're searching for restaurants. + + * features indicating the results of any API calls stored in + slots, e.g. `slot_matches` + + * features indicating what the last action was (e.g. + `prev_action_listen`) + +2. **Convert all the features into numeric vectors**: + + We use the `X, y` notation that's common for supervised learning, + where `X` is an array of shape + `(num_data_points, time_dimension, num_input_features)`, + and `y` is an array of shape `(num_data_points, num_bot_features)` + or `(num_data_points, time_dimension, num_bot_features)` + containing the target class labels encoded as one-hot vectors. + + The target labels correspond to actions taken by the bot. + To convert the features into vector format, there are different + featurizers available: + + * `BinarySingleStateFeaturizer` creates a binary one-hot encoding: + + The vectors `X, y` indicate a presence of a certain intent, + entity, previous action or slot e.g. `[0 0 1 0 0 1 ...]`. + + * `LabelTokenizerSingleStateFeaturizer` creates a vector + + based on the feature label: + All active feature labels (e.g. `prev_action_listen`) are split + into tokens and represented as a bag-of-words. For example, actions + `utter_explain_details_hotel` and + `utter_explain_details_restaurant` will have 3 features in + common, and differ by a single feature indicating a domain. + + Labels for user inputs (intents, entities) and bot actions + are featurized separately. Each label in the two categories + is tokenized on a special character `split_symbol` + (e.g. `action_search_restaurant = {action, search, restaurant}`), + creating two vocabularies. A bag-of-words representation + is then created for each label using the appropriate vocabulary. + The slots are featurized as binary vectors, indicating + their presence or absence at each step of the dialogue. + +:::note +If the domain defines the possible `actions`, +`[ActionGreet, ActionGoodbye]`, +`4` additional default actions are added: +`[ActionListen(), ActionRestart(), +ActionDefaultFallback(), ActionDeactivateForm()]`. +Therefore, label `0` indicates default action listen, label `1` +default restart, label `2` a greeting and `3` indicates goodbye. + +::: + +### Tracker Featurizers + +It's often useful to include a bit more history than just the current state +when predicting an action. The `TrackerFeaturizer` iterates over tracker +states and calls a `SingleStateFeaturizer` for each state. There are two +different tracker featurizers: + +#### 1. Full Dialogue + +`FullDialogueTrackerFeaturizer` creates numerical representation of +stories to feed to a recurrent neural network where the whole dialogue +is fed to a network and the gradient is backpropagated from all time steps. +Therefore, `X` is an array of shape +`(num_stories, max_dialogue_length, num_input_features)` and +`y` is an array of shape +`(num_stories, max_dialogue_length, num_bot_features)`. +The smaller dialogues are padded with `-1` for all features, indicating +no values for a policy. + +#### 2. Max History + +`MaxHistoryTrackerFeaturizer` creates an array of previous tracker +states for each bot action or utterance, with the parameter +`max_history` defining how many states go into each row in `X`. +Deduplication is performed to filter out duplicated turns (bot actions +or bot utterances) in terms of their previous states. Hence `X` +has shape `(num_unique_turns, max_history, num_input_features)` +and `y` is an array of shape `(num_unique_turns, num_bot_features)`. + +For some algorithms a flat feature vector is needed, so `X` +should be reshaped to +`(num_unique_turns, max_history \* num_input_features)`. If numeric +target class labels are needed instead of one-hot vectors, use +`y.argmax(axis=-1)`. diff --git a/docs/docs/prototype-an-assistant.mdx b/docs/docs/prototype-an-assistant.mdx new file mode 100644 index 000000000000..1c00b65c17c6 --- /dev/null +++ b/docs/docs/prototype-an-assistant.mdx @@ -0,0 +1,187 @@ +--- +id: prototype-an-assistant +sidebar_label: 'Prototype an Assistant' +title: 'Prototype an Assistant' +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; +import Prototyper, { Chat, DownloadButton, TrainButton } from '@site/src/components/prototyper'; + +Get started with Rasa Open Source and learn how to create an assistant from scratch! + +This page explains the basics of building an assistant with Rasa Open Source and shows the structure of a Rasa project. +You can test it out right here without installing anything. You can also install +[install Rasa Open Source](./installation.mdx) and follow along in your command line. + +<Prototyper + startPrototyperApi="https://trainer-service.prototyping.rasa.com/startPrototyping" + trainModelApi="https://trainer-service.prototyping.rasa.com/trainings" + chatBlockSelector="#rasa-chat-block" + chatBlockScriptUrl={useBaseUrl("js/rasa-chatblock.min.js")} +> + +## 1. Define a basic user goal + +To prototype an assistant, start with a single [user goal](./glossary.mdx#user-goal) +that your assistant will handle. You can create a prototype with the "subscribing to a newsletter" user goal +we have chosen, or you can come up with your own. If you choose your own user goal, +you can create a prototype that handles your goal by following the instructions to change each +section throughout the tutorial. + + +## 2. Create some NLU data + +[NLU](./glossary.mdx#rasa-nlu) data provides examples of what users might say to your assistant and what they mean by it. +[Intent](./glossary.mdx#intent) refers to what the user means with a specific message. +Your assistant can only learn to recognize intents for which it has seen examples, so we need to provide some data. + +Add examples for your user-goal specific intents in the format shown below. +You can delete the `subscribe` and `inform` intents if you're not using them; +you can also add or change examples for any of the other intents. + +```yaml live noResult name=nlu +nlu: +- intent: greet + examples: | + - Hi + - Hey! + - Hallo + - Good day + - Good morning + +- intent: subscribe + examples: | + - I want to get the newsletter + - Can you send me the newsletter? + - Can you sign me up for the newsletter? + +- intent: inform + examples: | + - My email is example@example.com + - random@example.com + - Please send it to anything@example.com + - Email is something@example.com +``` + +[Read about best practices for NLU](./training-data-format.mdx) + + +## 3. Define a simple form + +For most user goals, the bot will need to collect some information from the user to fulfill their request. +To do so, we define a [form](./glossary.mdx#form). You can change the name of the form to reflect your user goal. +Add to or replace the `email` item in the list below to reflect the information the bot needs +to collect for your user goal. Leave the `type` field the same for any items you add. + +```yaml live noResult name=forms +forms: +- newsletter_form: + email: + - type: from_text +``` + +## 4. Write a rule + +Rules are conversation snippets that should always proceed in exactly the same way. +Update the rule below for running the form you defined above. + +```yaml live noResult name=rules +rules: + - rule: activate subscribe form + steps: + - intent: subscribe + - action: newsletter_form + - active_loop: newsletter_form +``` + +## 5. Write some stories + +Stories are example conversations of how your assistant should handle a user's intent in context. +The first stories you write should follow the [happy path](./glossary.mdx#happy--unhappy-paths) for your user goal. + +A story contains one or more blocks of (user) intent and (bot) [actions](./glossary.mdx#action) or [responses](./glossary.mdx#template--response--utterance). +The form you defined above is one kind of action; responses are just bot messages. +Give intuitive names to your responses starting with `utter_` for now; you'll define what they return later. + +Using the general template of the story we have shown you below, write a story or two that serve +the user goal you have chosen. You can include the steps from your form rule wherever appropriate. + +```yaml live noResult name=stories +stories: + - story: greet and subscribe + steps: + - intent: greet + - action: utter_greet + - intent: subscribe + - action: newsletter_form + - active_loop: newsletter_form +``` + +[Read about best practices for writing stories](./stories.mdx) + + +## 6. Edit responses + +To give your bot messages to respond to the user with, you need to define responses. +You can specify one or more text options for each response. If there are multiple, one of the options +will be chosen at random whenever that response is predicted. + +You can add or change text for any of the responses below. If you're using your own user goal, +replace the last three responses with the response you used in your stories above. + +```yaml live noResult name=responses +responses: + utter_greet: + - text: | + Hello! How can I help you? + - text: | + Hi! + utter_ask_email: + - text: | + What is your email address? + utter_subscribed: + - text: | + I've subscribed {email} to the newsletter! + - text: | + You've been subscribed, the newsletter will be sent to {email}. +``` + +:::note +For this prototype, we have only defined responses, meaning the only thing the assistant does is respond with a +predefined message. Custom actions, however, can be defined to do whatever you'd like. +For example, for the user goal of subscribing to a newsletter, you could create a custom action +that adds the user's email to a database. You can see an example of this +in [Sara's action code](https://github.com/RasaHQ/rasa-demo/blob/master/actions/actions.py). + +::: + +## 7. Train and run + +Rasa has a command line interface that allows you to train and run your bot from a terminal. +To train your bot on the NLU data, stories and responses you've just defined, run `rasa train` using +the button below: + +<TrainButton /> + +Once your model has finished training, you can talk to your assistant: + +<div id="rasa-chat-block"></div> + + +## 8. What's next? + +You can download this project and build on it to create a more advanced assistant. +In your downloaded project, you'll notice several files that were configured for you that you didn't edit on this page. +Check out other docs pages to learn more about [domains](./domain.mdx), [actions](./actions.mdx), +and your [model configuration](./model-configuration.mdx). + +<DownloadButton /> + +<br/><br/> + +<!-- TODO: REMOVE THIS NOTE FOR THE 2.0 release --> + +*Your prototype was created using a Rasa Open Source alpha release. +Please make sure you've [installed at least Rasa Open Source 2.0.0a2](installation.mdx) to develop +your project further.* + +</Prototyper> diff --git a/docs/docs/rasa-sdk-changelog.mdx b/docs/docs/rasa-sdk-changelog.mdx new file mode 100644 index 000000000000..816f8122e66d --- /dev/null +++ b/docs/docs/rasa-sdk-changelog.mdx @@ -0,0 +1,5 @@ +--- +id: rasa-sdk-changelog +sidebar_label: Rasa SDK Changelog +title: Rasa SDK Changelog +--- diff --git a/docs/docs/reminders-and-external-events.mdx b/docs/docs/reminders-and-external-events.mdx new file mode 100644 index 000000000000..6d2ef2d07061 --- /dev/null +++ b/docs/docs/reminders-and-external-events.mdx @@ -0,0 +1,156 @@ +--- +id: reminders-and-external-events +sidebar_label: Reminders and External Events +title: Reminders and External Events +description: Learn how to use external events and schedule reminders. +--- + +The `ReminderScheduled` event and the +[trigger_intent endpoint](./http-api.mdx#operation/triggerConversationIntent) let your assistant remind you +about things after a given period of time, or to respond to external events (other applications, sensors, etc.). +[ReminderBot](https://github.com/RasaHQ/rasa/blob/master/examples/reminderbot/README.md) +is a full example assistant that implements these features. + +:::caution +Reminders don't work in request-response channels like the `rest` channel or `rasa shell`. +Custom connectors for assistants implementing reminders or external events should be built +off of the `CallbackInput` channel instead of the `RestInput` channel. + +See the [reminderbot README](https://github.com/RasaHQ/rasa/blob/master/examples/reminderbot/README.md) +for instructions on how to test your reminders locally. + +::: + +## Reminders + +Instead of an external sensor, you might just want to be reminded about something after a certain amount of time. +For this, Rasa provides the special event `ReminderScheduled`, and another event, `ReminderCancelled`, to unschedule a reminder. + + +### Scheduling Reminders + +Let's say you want your assistant to remind you to call a friend in 5 seconds. +(You probably want some longer time span, but for the sake of testing, let it be 5 seconds.) +Thus, we define an intent `ask_remind_call` with some NLU data, + +```md +## intent:ask_remind_call +- remind me to call [Albert](name) +- remind me to call [Susan](name) +- later I have to call [Daksh](name) +- later I have to call [Anna](name) +... +``` + +and connect this intent with a new custom action `action_set_reminder`. +We could make this connection by providing training stories (recommended for more complex assistants), +or using [Rules](./rules.mdx). + +The custom action `action_set_reminder` should schedule a reminder that, 5 seconds later, +triggers an intent `EXTERNAL_reminder` with all the entities that the user provided +in his/her last message (similar to an external event): + +```python +async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], +) -> List[Dict[Text, Any]]: +"""Schedule a reminder, supplied with the last message's entities.""" + + dispatcher.utter_message("I will remind you in 5 seconds.") + + date = datetime.datetime.now() + datetime.timedelta(seconds=5) + entities = tracker.latest_message.get("entities") + + reminder = ReminderScheduled( + "EXTERNAL_reminder", + trigger_date_time=date, + entities=entities, + name="my_reminder", + kill_on_user_message=False, + ) + + return [reminder] +``` + +Note that this requires the `datetime` and `rasa_sdk.events` packages. + +Finally, we define another custom action `action_react_to_reminder` and link it to the `EXTERNAL_reminder` intent +by writing a [Rule](./rules.mdx) for it: + +```yaml +rules: +- rule: Trigger `action_react_to_reminder` for `EXTERNAL_reminder` + steps: + - intent: EXTERNAL_reminder + - action: action_react_to_reminder +``` + +This tells the model which action to take when the time is up on the reminder. + +Instead of a custom action, we could also have used a simple response template. +But here we want to make use of the fact that the reminder can carry entities, and we can process the entities in this custom action. + +:::caution +Reminders are cancelled whenever you shutdown your Rasa server. + +::: + + +### Cancelling Reminders + +Sometimes the user may want to cancel a reminder that he has scheduled earlier. +A simple way of adding this functionality to your assistant is to create an intent `ask_forget_reminders` +and let your assistant respond to it with a custom action that returns the `ReminderCancelled()` event. + +`ReminderCancelled()` simply cancels all the reminders that are currently scheduled. +Alternatively, you may provide some parameters to narrow down the types of reminders that you want to cancel. + +* `ReminderCancelled(intent="greet")` cancels all reminders with intent `greet` + +* `ReminderCancelled(entities={...})` cancels all reminders with the given entities + +* `ReminderCancelled("...")` cancels the one unique reminder with the given name “`...`” that you supplied + during its creation + + +## External Events + +Let's say you want to send a message from some other device to change the course of an ongoing conversation. +For example, some moisture-sensor attached to a Raspberry Pi should inform your personal assistant that your favorite +plant needs watering, and your assistant should then relay this message to you. + +To do this, your Raspberry Pi needs to send a message to the +[trigger_intent endpoint](./http-api.mdx#operation/triggerConversationIntent) of your conversation. +As the name says, this injects a user intent (possibly with entities) into your conversation. +So for Rasa it is almost as if you had entered a message that got classified with this intent and these entities. +Rasa then needs to respond to this input with an action such as `action_warn_dry`. +The easiest and most reliable way to connect this action with the intent is to use +[Rules](./rules.mdx). + + +### Responding to External Events + +The first thing required to respond to an external event is the Session ID of the conversation that your +sensor should send a notification to. For example, if everyone in your office uses this Raspberry Pi to +keep track of their plants, the sensor needs to tell Rasa which user should be notified about a dehydrated plant. + +Once you have your Session ID, we need to prepare the assistant so it responds to messages from the sensor. +To this end, we define a new intent `EXTERNAL_dry_plant` without any NLU data. +This intent will later be triggered by the external sensor. +Here, we start the intent name with `EXTERNAL_` to indicate that this is not something the user would say, but you can name the intent however you like. + +In the domain file, we now connect the intent `EXTERNAL_dry_plant` with another custom action `action_warn_dry` +to dispatch the response to the external event to the user. + +Now, when you are in a conversation with id `38cc25d7e23e4dde800353751b7c2d3e`, then running + +```shell +curl -H "Content-Type: application/json" -X POST \ + -d '{"name": "EXTERNAL_dry_plant", "entities": {"plant": "Orchid"}}' \ + "http://localhost:5005/conversations/38cc25d7e23e4dde800353751b7c2d3e/trigger_intent?output_channel=latest" +``` + +in the terminal will cause your assistant to say “Your Orchid needs some water!”. diff --git a/docs/docs/responses.mdx b/docs/docs/responses.mdx new file mode 100644 index 000000000000..1c776f833919 --- /dev/null +++ b/docs/docs/responses.mdx @@ -0,0 +1,257 @@ +--- +id: responses +sidebar_label: Responses +title: Responses +--- + + +Responses are the simplest type of action, which utter a response you define in your +domain file, starting with `utter_`: + +```yaml +responses: + utter_my_message: + - text: "This is what I want my action to say!" +``` + + +:::note +If you want to change the text, or any other part of the bots response, +you need to retrain the assistant before these changes will be picked up. + +::: + + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="in-domain-responses"></a> + + +## Using Responses + +Responses are messages the bot will send back to the user. There are +two ways to use these responses: + +1. So long as the name of the response starts with `utter_`, the response can + directly be used as an action. You would add the response + to the domain: + + ```yaml + responses: + utter_greet: + - text: "Hey! How are you?" + ``` + + Afterwards, you can use the response as an action in the + stories: + + ```story + ## greet the user + * intent_greet + - utter_greet + ``` + + When `utter_greet` is run as an action, it will send the message from + the response back to the user. + +2. You can use the responses to generate response messages from your + custom actions using the dispatcher: + `dispatcher.utter_message(template="utter_greet")`. + This allows you to separate the logic of generating + the messages from the actual copy. In your custom action code, you can + send a message based on the response like this: + + ```python + from rasa_sdk.actions import Action + + class ActionGreet(Action): + def name(self): + return 'action_greet' + + def run(self, dispatcher, tracker, domain): + dispatcher.utter_message(template="utter_greet") + return [] + ``` + +## Rich Responses + +A response can be as simple as some text, or can include variation, visual +and interactive components, and more. + +### Images and Buttons + +Responses defined in a domain's yaml file can contain images and +buttons as well: + +```yaml +responses: + utter_greet: + - text: "Hey! How are you?" + buttons: + - title: "great" + payload: "great" + - title: "super sad" + payload: "super sad" + utter_cheer_up: + - text: "Here is something to cheer you up:" + image: "https://i.imgur.com/nGF1K8f.jpg" +``` + +It's common to use buttons as a shortcut to bypass NLU. +Rasa Open Source will send messages starting with a `/` to the +`RegexInterpreter`, which expects NLU input in the same format +as in story files, e.g. `/intent{entities}`. For example, if you let +users choose a color by clicking a button, the button payloads may look like: + +```yaml +utter_ask_color: +- text: "what color would you like?" + buttons: + - title: "blue" + payload: '/choose{"color": "blue"}' + - title: "red" + payload: '/choose{"color": "red"}' +``` + + +:::note +Please keep in mind that it is up to the implementation of the output +channel on how to display the defined buttons. The command line, for +example, can't display buttons or images, but tries to mimic them by +printing the options. + +::: + +### Custom Output Payloads + +You can also send any arbitrary output to the output channel using the +`custom:` key. Note that since the domain is in yaml format, the json +payload should first be converted to yaml format. + +For example, although date pickers are not a defined parameter in responses +because they are not supported by most channels, a Slack date picker +can be sent like so: + +```yaml +responses: + utter_take_bet: + - custom: + blocks: + - type: section + text: + text: "Make a bet on when the world will end:" + type: mrkdwn + accessory: + type: datepicker + initial_date: '2019-05-21' + placeholder: + type: plain_text + text: Select a date +``` + +### Channel-Specific Responses + +For each response, you can have multiple **response variations** (see [Variations](./domain.mdx#variations)). +If you have certain response variations that you would like sent only to specific +channels, you can specify this with the `channel:` key. The value should match +the name defined in the `name()` method of the channel's `OutputChannel` +class. Channel-specific responses are especially useful if creating custom +output payloads that will only work in certain channels. + +```yaml +responses: + utter_ask_game: + - text: "Which game would you like to play?" + channel: "slack" + custom: + blocks: + - type: actions + elements: + - type: button + text: + type: plain_text + emoji: true + text: "Chess :crown:" + value: '/inform{"game": "chess"}' + - type: button + text: + type: plain_text + emoji: true + text: "Checkers :checkered_flag:" + value: '/inform{"game": "checkers"}' + - type: button + text: + type: plain_text + emoji: true + text: "Fortnite :european_castle:" + value: '/inform{"game": "fortnite"}' + style: danger + - text: "Which game would you like to play?" + buttons: + - title: "Chess" + payload: '/inform{"game": "chess"}' + - title: "Checkers" + payload: '/inform{"game": "checkers"}' + - title: "Fortnite" + payload: '/inform{"game": "fortnite"}' +``` + +Each time your bot looks for responses, it will first check to see if there +are any channel-specific response variations for the connected channel. If there are, it +will choose **only** from these response variations. If no channel-specific response variations are +found, it will choose from any response variations that do not have a defined `channel`. +Therefore, it is good practice to always have at least one response variation for each +response that has no `channel` specified so that your bot can respond in all +environments, including in the shell and in interactive learning. + +### Variables + +You can also use **variables** in your responses to insert information +collected during the dialogue. You can either do that in your custom python +code or by using the automatic slot filling mechanism. For example, if you +have a response like this: + +```yaml +responses: + utter_greet: + - text: "Hey, {name}. How are you?" +``` + +Rasa will automatically fill that variable with a value found in a slot called +`name`. + +In custom code, you can retrieve a response by using: + +```python +from rasa_sdk.actions import Action + +class ActionCustom(Action): + def name(self): + return "action_custom" + + def run(self, dispatcher, tracker, domain): + # send utter default response to user + dispatcher.utter_message(template="utter_default") + # ... other code + return [] +``` + +If the response contains variables denoted with `{my_variable}` +you can supply values for the fields by passing them as keyword +arguments to `utter_message`: + +```python +dispatcher.utter_message(template="utter_greet", my_variable="my text") +``` + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="variations"></a> + +### Response Variations + +If you want to randomly vary the response sent to the user, you can list +multiple **response variations** and Rasa will randomly pick one of them, e.g.: + +```yaml +responses: + utter_greeting: + - text: "Hey, {name}. How are you?" + - text: "Hey, {name}. How is your day going?" +``` diff --git a/docs/docs/retrieval-actions.mdx b/docs/docs/retrieval-actions.mdx new file mode 100644 index 000000000000..b002cfc66a34 --- /dev/null +++ b/docs/docs/retrieval-actions.mdx @@ -0,0 +1,218 @@ +--- +id: retrieval-actions +sidebar_label: Retrieval Actions +title: Retrieval Actions +description: Use a retrieval model to select chatbot responses in open source bot framework Rasa. +--- + +Retrieval actions are designed to make it simpler to work with small talk and simple questions. +For example, if your assistant can handle 100 FAQs and 50 different small talk intents, you can use a single retrieval +action to cover all of these. +From a dialogue perspective, these single-turn exchanges can all be treated equally, so this simplifies your stories. + +Instead of having a lot of stories like: + +```yaml +stories: +- story: weather + steps: + - intent: ask_weather + - action: utter_ask_weather + +- story: introduction + steps: + - intent: ask_name + - action: utter_introduce_myself + +# ... +``` + +You can cover all of these with a single story where the above intents are grouped +under a common `chitchat` intent: + +```yaml +stories: +- story: chitchat + steps: + - intent: chitchat + - action: respond_chitchat +``` + +A retrieval action uses the output of a [ResponseSelector](./components/selectors.mdx#responseselector) component from NLU which learns a +retrieval model to predict the correct response from a list of candidate responses given a user message text. + +:::note Retrieval Model Blog Post +There is an in-depth blog post [here](https://blog.rasa.com/response-retrieval-models/) about how to use retrieval +actions for handling single turn interactions. + +::: + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="retrieval-training-data"></a> + +## Training Data + +Like the name suggests, retrieval actions learn to select the correct response from a list of candidates. +As with other message data, you need to include examples of what your users will say in +your training data file: + +```yaml title="data/nlu.yml" +nlu: +- intent: chitchat/ask_name + examples: | + - what's your name + - who are you? + - what are you called? + +- intent: chitchat/ask_weather + examples: | + - how's weather? + - is it sunny where you are? +``` + +First, all of these examples will be combined into a single `chitchat` +retrieval intent that NLU will predict. All retrieval intents have a suffix +added to them which identifies a particular response text for your assistant, in the +above example - `ask_name` and `ask_weather`. The suffix is separated from +the intent name by a `/` delimiter. + +Next, include response texts for all retrieval intents in a training data file: + +```yaml title="data/responses.yml" +responses: + chitchat/ask_name: + - text: "my name is Sara, Rasa's documentation bot!" + + chitchat/ask_weather: + - text: "it's always sunny where I live" +``` + +:::info Responses format +The responses use the same format as the [responses in the domain](responses.mdx). +This means, you can also use buttons, images and any other multimedia elements in +your responses. +::: + +The retrieval model is trained separately as part of the NLU training pipeline +to select the correct response. One important thing to remember is that the +retrieval model uses the text of the response messages to select the correct +one. If you change the text of these responses, you have to retrain your +retrieval model! This is a key difference to the responses defined in +your domain file. + +:::note Special meaning of `/` +As shown in the above examples, the `/` symbol is reserved as a delimiter to separate +retrieval intents from response text identifier. Make sure not to use it in the +name of your intents. + +::: + +## Configuration File + +You need to include the [ResponseSelector](./components/selectors.mdx#responseselector) +component in your configuration. The component needs a tokenizer, a featurizer and an +intent classifier to operate on the user message before it can predict a response +and hence these components should be placed before `ResponseSelector` in the +NLU configuration. An example: + +```yaml title="config.yml" {8} +language: "en" + +pipeline: +- name: "WhitespaceTokenizer" + intent_split_symbol: "_" +- name: "CountVectorsFeaturizer" +- name: "DIETClassifier" +- name: "ResponseSelector" +``` + +## Domain + +Rasa uses a naming convention to match the intent names like `chitchat/ask_name` +to the retrieval action. +The correct action name in this case is `respond_chitchat`. +The prefix `respond_` is mandatory to identify it as a retrieval action. +Another example - correct action name for `faq/ask_policy` would be `respond_faq` +To include this in your domain, add it to the list of actions: + +```yaml title="domain.yml" +actions: + # ... + - respond_chitchat + - respond_faq +``` + +A simple way to ensure that the retrieval action is predicted after the chitchat +intent is to use [Rules](./rules.mdx). +However, you can also include this action in your stories. +For example, if you want to repeat a question after handling chitchat: + +```yaml title="data/stories.yml" {6-8} +stories: +- story: interruption + steps: + - intent: search_restaurant + - action: utter_ask_cuisine + - intent: chitchat + - action: respond_chitchat + - action: utter_ask_cuisine +``` + +## Multiple Retrieval Actions + +If your assistant includes both FAQs **and** chitchat, it is possible to +separate these into separate retrieval actions, for example having intents +like `chitchat/ask_weather` and `faq/returns_policy`. +Rasa supports adding multiple `RetrievalActions` like `respond_chitchat` and +`respond_returns_policy` + +This works out of the box. Both actions can share the same retrieval model, +you only need to specify a single `ResponseSelector` component in your configuration: + +```yaml title="config.yml" {8} +language: "en" + +pipeline: +- name: "WhitespaceTokenizer" + intent_split_symbol: "_" +- name: "CountVectorsFeaturizer" +- name: "DIETClassifier" +- name: "ResponseSelector" +``` + +## Under the hood: Parsing Response Selector Output + +The parsed output from NLU will have a property named `response_selector` +containing the output for each response selector. Each response selector is +identified by `retrieval_intent` parameter of that response selector +and stores two properties: + +* `response`: The predicted response text and the prediction confidence. + +* `ranking`: Ranking with confidences of top 10 candidate responses. + +Example result: + +```json +{ + "text": "What is the recommend python version to install?", + "entities": [], + "intent": {"confidence": 0.6485910906220309, "name": "faq"}, + "intent_ranking": [ + {"confidence": 0.6485910906220309, "name": "faq"}, + {"confidence": 0.1416153159565678, "name": "greet"} + ], + "response_selector": { + "faq": { + "response": {"text": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, + "full_retrieval_intent": "faq/supports", + "ranking": [ + {"confidence": 0.7356462617, "full_retrieval_intent": "faq/supports"}, + {"confidence": 0.2134543431, "full_retrieval_intent": "faq/languages"} + ] + } + } +} +``` + +If the `retrieval_intent` parameter of a particular response selector was left to its default value, +the corresponding response selector will be identified as `default` in the returned output. diff --git a/docs/docs/rules.mdx b/docs/docs/rules.mdx new file mode 100644 index 000000000000..9d3287c87540 --- /dev/null +++ b/docs/docs/rules.mdx @@ -0,0 +1,167 @@ +--- +id: rules +sidebar_label: Rules +title: Rules +description: Use Rasa Open Source rules to respond to FAQs, fill forms, or handle fallbacks gracefully. +--- + +Rasa rules are a type of training data used to train Rasa's dialogue management models. +Rules describe parts of conversations that should always follow the same path. +Useful applications for rules are for example: + +* **FAQs**: FAQs are questions that users ask independent of the current context. + Rules are an easy way to specify fixed answers to these questions. + +* [Fallback Actions](./fallback-handoff.mdx#fallbackactions): Users might confront your assistant with unexpected messages. + The messages will typically receive low intent confidence. + You can use the [FallbackClassifier](./components/intent-classifiers.mdx#fallbackclassifier) + in combination with rules to treat messages with low NLU confidence like an FAQ. + +* [Forms](./forms.mdx): It's a common use case for assistants to collect form-like + data from the user. Both activation of forms and handling of unexpected + events as part of a form will often follow fixed paths. + +**Don't overuse rules**. Rules are great to handle small specific conversation parts but +unlike [Stories](./stories.mdx) rules don't have the power to generalize to unseen conversation +paths. Combine rules and stories to make your assistant robust and able to handle +real user behavior. + +## Writing a Rule + +Before you start writing rules, you have to make sure that the +[Rule Policy](./policies.mdx#rule-policy) is added to your model configuration: + +```yaml +policies: +- ... # Other policies +- name: RulePolicy +``` + +Rules can then be added to the `rules` section of your training data. + +To indicate that a rule can apply at any point in a conversation, start with the +intent which starts the conversation and then add the actions which your assistant +should perform in response to that. + +```yaml +rules: +- rule: Say `hello` whenever the user sends a message with intent `greet` + steps: + - intent: greet + - action: utter_greet +``` + +This example rule applies at the start of conversation as well as when the user decides +to a send a message with an intent `greet` in the middle of an ongoing conversation. + +### Rules for the Conversation Start + +To write a rule which only applies at the beginning of a conversation, add a +`conversation_start: True` to your rule: + +```yaml +rules: + +- rule: Say `hello` when the user starts a conversation with intent `greet` + conversation_start: True + steps: + - intent: greet + - action: utter_greet +``` + +If a user starts by sending a message with an intent other than `greet`, and sends a +message with intent `greet` later in the conversation, the rule will not match. + +### Rules with Pre-Conditions + +Rules can describe requirements which have to be fulfilled for the rule to be +applicable. To do so, add any information about the prior conversation, under the +`condition`: + +```yaml +rules: + +- rule: Only say `hello` when the user provided a name + condition: + - slot_was_set: + - user_provided_name: true + steps: + - intent: greet + - action: utter_greet +``` + +### Skip Waiting for User Input at the End of a Rule + +By default, rules will wait for the next user message when finished with the last step: + +```yaml +rules: + +- rule: Rule which will wait for user message when it was applied + steps: + - intent: greet + - action: utter_greet + # Every rule implicitly includes a prediction for `action_listen` as last step. + # This means that Rasa Open Source will wait for the next user message. + - action_listen +``` + +If you want to hand over to another story or rule, add `wait_for_user_input: False` +to your rule.intent This indicates that the assistant should execute another action +before waiting for more user input. + +```yaml +rules: + +- rule: Rule which will not wait for user message once it was applied + steps: + - intent: greet + - action: utter_greet + wait_for_user_input: False +``` + +### Rules and Forms + +When a [Form](./forms.mdx) is active, rules become applicable again if + +- the form filled all required slots +- the form rejected its execution (see +[Handling unhappy paths](./forms.mdx#writing-stories--rules-for-unhappy-form-paths) for + more details) + +## Use Cases + +This section explains common use cases of rules. + +### FAQs / Mapping Intents to Actions + +Some messages do not require any context to answer them. Common examples are either FAQs +or triggers which are sent by [Reminders and External Events](./reminders-and-external-events.mdx). + +To map an intent to a certain action, you need to define a +[Rule](./rules.mdx). +The following example always responds with an action `utter_greet` in case the user +greets the assistant. + +```yaml +rules: + +- rule: Say `hello` whenever the user sends a message with intent `greet` + steps: + - intent: greet + - action: utter_greet +``` + +### Failing Gracefully + +Handling unknown messages gracefully is key to a successful assistant. As unknown +messages can happen at any time in a conversation, they are a special case of +[FAQs](./rules.mdx#faqs--mapping-intents-to-actions). Please see the docs on +[Fallback Actions](./fallback-handoff.mdx#fallbackactions) for different ways to +handle fallbacks gracefully. + +### Forms + +Use [Forms](./forms.mdx) if you need to collect multiple pieces of information from a user +before being able to process their request. A common example for this is booking a table +at a restaurant which requires information like name, number of people, and time. diff --git a/docs/docs/running-action-server.mdx b/docs/docs/running-action-server.mdx new file mode 100644 index 000000000000..ebde3fbf044b --- /dev/null +++ b/docs/docs/running-action-server.mdx @@ -0,0 +1,41 @@ +--- +id: running-action-server +sidebar_label: Action Server +title: Action Server +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; +import Redoc from '@site/src/components/redoc'; + +## Running the Action Server + +Because `rasa` is not a dependency of `rasa-sdk`, you can run the action +server without installing `rasa`. There are two ways to run the action server +depending on how you have it installed: + +If you have `rasa` installed, run this command to start your action server: + +```bash +rasa run actions +``` + +Otherwise, if you do not have `rasa` installed, run this command: + +```bash +python -m rasa_sdk --actions actions +``` + +The file that contains your custom actions should be called `actions.py`. +Alternatively, you can use a package directory called `actions` or else +manually specify an actions module or package with the `--actions` flag. + +The full list of options for running the action server with either command is: + +```text [rasa run actions --help] +``` + +## Action Server HTTP API + +<!-- TODO: Document the rest of the API endpoints --> + +<Redoc specUrl={useBaseUrl("/spec/action-server.yml")} /> diff --git a/docs/docs/setting-up-ci-cd.mdx b/docs/docs/setting-up-ci-cd.mdx new file mode 100644 index 000000000000..960fbec395ce --- /dev/null +++ b/docs/docs/setting-up-ci-cd.mdx @@ -0,0 +1,198 @@ +--- +id: setting-up-ci-cd +sidebar_label: Setting up CI/CD +title: Setting up CI/CD +description: Set up a CI/CD pipeline to ensure that iterative improvements to your assistant are tested and deployed with minimum manual effort +--- + +Even though developing a contextual assistant is different from developing traditional +software, you should still follow software development best practices. +Setting up a Continuous Integration (CI) and Continuous Deployment (CD) +pipeline ensures that incremental updates to your bot are improving it, not harming it. + +## Overview + +Continous Integration (CI) is the practice of merging in code changes +frequently and automatically testing changes as they are committed. Continuous +Deployment (CD) means automatically deploying integrated changes to a staging +or production environment. Together, they allow you to make more frequent improvements +to your assistant and efficiently test and deploy those changes. + +This guide will cover **what** should go in a CI/CD pipeline, specific to a +Rasa project. **How** you implement that pipeline is up to you. +There are many CI/CD tools out there, such as [GitHub Actions](https://github.com/features/actions), +[GitLab CI/CD](https://docs.gitlab.com/ee/ci/), [Jenkins](https://www.jenkins.io/doc/), and +[CircleCI](https://circleci.com/docs/2.0/). We recommend choosing a tool that integrates with +whatever Git repository you use. + +## Continuous Integration (CI) + +The best way to improve an assistant is with frequent [incremental updates](https://rasa.com/docs/rasa-x/user-guide/fix-problems). +No matter how small a change is, you want to be sure that it doesn't introduce +new problems or negatively impact the performance of your assistant. + +It is usually best to run CI checks on merge / pull requests or on commit. Most tests are +quick enough to run on every change. However, you can choose to run more +resource-intensive tests only when certain files have been changed or when some +other indicator is present. For example, if your code is hosted on Github, +you can make a test run only if the pull request has a certain label (e.g. “NLU testing required”). + +### Validate Data and Stories + +Data validation verifies that there are no mistakes or +major inconsistencies in your domain file, NLU data, or story data. + +```bash +rasa data validate --fail-on-warnings --max-history <max_history> +``` + +By default the validator searches only for errors in the data (e.g. the same +example being listed as an example for two intents), but does not report other +minor issues (such as unused intents, utterances that are not listed as +actions) that won't prevent training a model, but might indicate +messy data. + +If data validation results in errors, training a model will also fail, so it's +always good to run this check before training a model. By including the +`--fail-on-warnings` flag, this step will also fail on those warnings indicating minor issues. + +Data validation also includes story structure validation. +Story validation checks if you have any +stories where different bot actions follow from the same dialogue history. +Conflicts between stories will prevent a model from learning the correct +pattern for a dialogue. Set the `--max-history` parameter to the value of `max_history` for the +memoization policy in your `config.yml`. If you haven't set one, use the default of `5`. + +:::caution +The `rasa data validate stories` command assumes that all your **story names are unique**. +If your stories are in the Markdown format, you may find duplicate names with a command like +`grep -h "##" data/\*.md | uniq -c | grep "^[^1]"`. + +::: + +### Train a Model + +```bash +rasa train +``` + +Training a model verifies that your NLU pipeline and policy configurations are +valid and trainable, and it provides a model to use for test conversations. +If it passes the CI tests, then you can also [upload the trained model](./setting-up-ci-cd.mdx#uploading-a-model) +to your server as part of the continuous deployment process . + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="test-the-assistant"></a> + +### Test the Assistant + +Testing your trained model on [test conversations](./testing-your-assistant.mdx#end-to-end-testing) is the best way to have confidence in how your assistant +will act in certain situations. These stories, written in a modified story +format, allow you to provide entire conversations and test that, given this +user input, your model will behave in the expected manner. This is especially +important as you start introducing more complicated stories from user +conversations. + +```bash +rasa test --stories tests/conversation_tests.md --fail-on-prediction-errors +``` + +The `--fail-on-prediction-errors` flag ensures the test will fail if any test +conversation fails. + +End-to-end testing is only as thorough and accurate as the test +cases you include, so you should continue to grow your set of test conversations +as you make improvements to your assistant. A good rule of thumb to follow is that you should aim for your test conversations +to be representative of the true distribution of real conversations. +Rasa X makes it easy to [add test conversations based on real conversations](https://rasa.com/docs/rasa-x/user-guide/test-assistant/#how-to-create-tests). + +Note: End-to-end testing does **not** execute your action code. You will need to +[test your action code](./setting-up-ci-cd.mdx#testing-action-code) in a seperate step. + +### Compare NLU Performance + +If you've made significant changes to your NLU training data (e.g. +splitting an intent into two intents or adding a lot of training examples), you should run a +[full NLU evaluation](./testing-your-assistant.mdx#evaluating-an-nlu-model). You'll want to compare +the performance of the NLU model without your changes to an NLU model with your +changes. + +You can do this by running NLU testing in cross-validation mode: + +```bash +rasa test nlu --cross-validation +``` + +You could also train a model on a training set and testing it on a test set. If you use the train-test +set approach, it is best to [shuffle and split your data](./command-line-interface.mdx#create-a-train-test-split) using `rasa data split` as part of this CI step, as +opposed to using a static NLU test set, which can easily become outdated. + +Because this test doesn't result in a pass/fail exit code, it's best to make +the results visible so that you can interpret them. +For example, [this workflow](https://gist.github.com/amn41/de555c93913a01fbd56df2e2d211862c) +includes commenting on a PR with a results table that shows which intents are confused with others. + +Since NLU comparison can be a fairly resource intensive test, you may choose to run this test +only when certain conditions are met. Conditions might include the presence of a manual label (e.g. “NLU +testing required”), changes to NLU data, or changes to the NLU pipeline. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="testing-action-code"></a> + +### Test Action Code + +The approach used to test your action code will depend on how it is +implemented. For example, if you connect to external APIs, it is recommended to write unit tests to ensure +that those APIs respond as expected to common inputs. However you test your action code, you should +include these tests in your CI pipeline so that they run each time you make changes. + +## Continuous Deployment (CD) + +To get improvements out to your users frequently, you will want to automate as +much of the deployment process as possible. + +CD steps usually run on push or merge to a certain branch, once CI checks have +succeeded. + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="uploading-a-model"></a> + +### Deploy your Rasa Model + +If you ran [end-to-end tests](./setting-up-ci-cd.mdx#test-the-assistant) in your CI pipeline, +you'll already have a trained model. You can set up your CD pipeline to upload the trained model to your +Rasa server if the CI results are satisfactory. For example, to upload a model to Rasa X: + +```bash +curl -k -F "model=@models/my_model.tar.gz" "https://example.rasa.com/api/projects/default/models?api_token={your_api_token}" +``` + +If you are using Rasa X, you can also [tag the uploaded model](https://rasa.com/docs/rasa-x/api/rasa-x-http-api/#tag/Models/paths/~1projects~1{project_id}~1models~1{model}~1tags~1{tag}/put) +as `active` (or whichever deployment you want to tag if using multiple [deployment environments](https://rasa.com/docs/rasa-x/enterprise/deployment-environments/#)): + +```bash +curl -X PUT "https://example.rasa.com/api/projects/default/models/my_model/tags/active" +``` + +However, if your update includes changes to both your model and your action +code, and these changes depend on each other in any way, you should **not** +automatically tag the model as `production`. You will first need to build and +deploy your updated action server, so that the new model won't e.g. call +actions that don't exist in the pre-update action server. + +### Deploy your Action Server + +You can automate +[building and uploading a new image for your action server](./how-to-deploy.mdx#building-an-action-server-image), +to an image repository for each +update to your action code. As noted above, you should be careful with +automatically deploying a new image tag to production if the action server +would be incompatible with the current production model. + +## Example CI/CD pipelines + +As examples, see the CI/CD pipelines for +[Sara](https://github.com/RasaHQ/rasa-demo/blob/master/.github/workflows/build_and_deploy.yml), +the Rasa assistant that you can talk to in the Rasa Docs, and +[Carbon Bot](https://github.com/RasaHQ/carbon-bot/blob/master/.github/workflows/model_ci.yml). +Both use [Github Actions](https://github.com/features/actions) as a CI/CD tool. + +These examples are just two of many possibilities. If you have a CI/CD setup you like, please +share it with the Rasa community on the [forum](https://forum.rasa.com). diff --git a/docs/utils/__init__.py b/docs/docs/sources/.keep similarity index 100% rename from docs/utils/__init__.py rename to docs/docs/sources/.keep diff --git a/docs/docs/stories.mdx b/docs/docs/stories.mdx new file mode 100644 index 000000000000..48da6d13f204 --- /dev/null +++ b/docs/docs/stories.mdx @@ -0,0 +1,219 @@ +--- +id: stories +sidebar_label: Stories +title: Stories +description: Stories are used to teach Rasa real conversation designs to learn from providing the basis for a scalable machine learning dialogue management. +--- + +Rasa stories are a type of training data used to train Rasa's dialogue management +models. While [Rules](./rules.mdx) implement a fixed +behavior for small conversation parts, stories can be used to train models which are +able to generalize to unseen conversation paths. + +Rasa stories are a form of training data used to train the Rasa’s dialogue management models. + +A story is a representation of a conversation between a user and an AI assistant, converted into a specific format where user inputs are expressed as corresponding intents (and entities where necessary) while the responses of an assistant are expressed as corresponding action names. + +A training example for the Rasa Core dialogue system is called a **story**. +This is a guide to the story data format. + +:::note +You can also **spread your stories across multiple files** and specify the +folder containing the files for most of the scripts (e.g. training, +visualization). The stories will be treated as if they would have +been part of one large file. + +::: + +## Format + +Here's an example of a dialogue in the Rasa story format: + +```story +## greet + location/price + cuisine + num people <!-- name of the story - just for debugging --> +* greet + - action_ask_howcanhelp +* inform{"location": "rome", "price": "cheap"} <!-- user utterance, in format intent{entities} --> + - action_on_it + - action_ask_cuisine +* inform{"cuisine": "spanish"} + - action_ask_numpeople <!-- action that the bot should execute --> +* inform{"people": "six"} + - action_ack_dosearch +``` + +### What makes up a story? + +* A story starts with a name preceded by two hashes `## story_03248462`. + You can call the story anything you like, but it can be very useful for + debugging to give them descriptive names! + +* The end of a story is denoted by a newline, and then a new story + starts again with `##`. + +* Messages sent by the user are shown as lines starting with `\*` + in the format `intent{"entity1": "value", "entity2": "value"}`. + +* Actions executed by the bot are shown as lines starting with `-` + and contain the name of the action. + +* Events returned by an action are on lines immediately after that action. + For example, if an action returns a `SlotSet` event, this is shown as + `slot{"slot_name": "value"}`. + +### User Messages + +While writing stories, you do not have to deal with the specific contents of +the messages that the users send. Instead, you can take advantage of the output +from the NLU pipeline, which lets you use just the combination of an intent and +entities to refer to all the possible messages the users can send to mean the +same thing. + +It is important to include the entities here as well because the policies learn +to predict the next action based on a *combination* of both the intent and +entities (you can, however, change this behavior using the +[use_entities](./domain.mdx#use-entities) attribute). + +:::caution +`/` symbol is reserved as a delimiter to separate retrieval intents from response text identifiers. +Refer to `Training Data Format` section of [Retrieval Actions](./retrieval-actions.mdx) for more details on this format. +If any of the intent names contain the delimiter, the file containing these stories will be considered as a training +file for [ResponseSelector](./components/selectors.mdx#responseselector) model and will be ignored for training Core models. + +::: + +### Actions + +While writing stories, you will encounter two types of actions: utterance actions +and custom actions. Utterance actions are hardcoded messages that a bot can respond +with. Custom actions, on the other hand, involve custom code being executed. + +All actions (both utterance actions and custom actions) executed by the bot are shown +as lines starting with `-` followed by the name of the action. + +The responses for utterance actions must begin with the prefix `utter_`, and must match the name +of the response defined in the domain. + +For custom actions, the action name is the string you choose to return from +the `name` method of the custom action class. Although there is no restriction +on naming your custom actions (unlike utterance actions), the best practice here is to +prefix the name with `action_`. + +### Events + +Events such as setting a slot or activating/deactivating a form have to be +explicitly written out as part of the stories. Having to include the events +returned by a custom action separately, when that custom action is already +part of a story might seem redundant. However, since Rasa cannot +determine this fact during training, this step is necessary. + +You can read more about events [here](./events.mdx). + +#### Slot Events + +Slot events are written as `- slot{"slot_name": "value"}`. If this slot is set +inside a custom action, it is written on the line immediately following the +custom action event. If your custom action resets a slot value to None, the +corresponding event for that would be `-slot{"slot_name": null}`. + +#### Form Events + +There are three kinds of events that need to be kept in mind while dealing with +forms in stories. + +* A form action event (e.g. `- restaurant_form`) is used in the beginning when first starting a form, and also while resuming the form action when the form is already active. + +* A form activation event (e.g. `- active_loop{"name": "restaurant_form"}`) is used right after the first form action event. + +* A form deactivation event (e.g. `- active_loop{"name": null}`), which is used to deactivate the form. + +:::note +In order to get around the pitfall of forgetting to add events, the recommended +way to write these stories is to use [interactive learning](./writing-stories.mdx#using-interactive-learning). + +::: + +## Checkpoints and OR statements + +Checkpoints and OR statements should both be used with caution, if at all. +There is usually a better way to achieve what you want by using forms and/or +retrieval actions. + +### Checkpoints + +You can use `> checkpoints` to modularize and simplify your training +data. Checkpoints can be useful, but **do not overuse them**. Using +lots of checkpoints can quickly make your example stories hard to +understand. It makes sense to use them if a story block is repeated +very often in different stories, but stories *without* checkpoints +are easier to read and write. Here is an example story file which +contains checkpoints (note that you can attach more than one checkpoint +at a time): + +```story +## first story +* greet + - action_ask_user_question +> check_asked_question + +## user affirms question +> check_asked_question +* affirm + - action_handle_affirmation +> check_handled_affirmation + +## user denies question +> check_asked_question +* deny + - action_handle_denial +> check_handled_denial + +## user leaves +> check_handled_denial +> check_handled_affirmation +* goodbye + - utter_goodbye +``` + +:::note +Unlike regular stories, checkpoints are not restricted to starting with an +input from the user. As long as the checkpoint is inserted at the right points +in the main stories, the first event can be a custom action or a response action +as well. + +::: + +### OR Statements + +Another way to write shorter stories, or to handle multiple intents +the same way, is to use an `OR` statement. For example, if you ask +the user to confirm something, and you want to treat the `affirm` +and `thankyou` intents in the same way. The story below will be +converted into two stories at training time: + +```story +## story +... + - utter_ask_confirm +* affirm OR thankyou + - action_handle_affirmation +``` + +Just like checkpoints, `OR` statements can be useful, but if you are using a +lot of them, it is probably better to restructure your domain and/or intents. + +:::caution +Overusing these features (both checkpoints and OR statements) +will slow down training. + +::: + +## End-to-End Story Evaluation Format + +The end-to-end story format is a format that combines both NLU and Core training data +into a single file for evaluation. Read more about [Testing Your Assistant](./testing-your-assistant.mdx) + +:::caution +This format is only used for end-to-end evaluation and cannot be used for training. + +::: diff --git a/docs/docs/testing-your-assistant.mdx b/docs/docs/testing-your-assistant.mdx new file mode 100644 index 000000000000..afb0b1bea4dc --- /dev/null +++ b/docs/docs/testing-your-assistant.mdx @@ -0,0 +1,307 @@ +--- +id: testing-your-assistant +sidebar_label: Testing Your Assistant +title: Testing Your Assistant +description: Test your Rasa Open Source assistant to validate and improve your conversations +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note +If you are looking to tune the hyperparameters of your NLU model, +check out this [tutorial](https://blog.rasa.com/rasa-nlu-in-depth-part-3-hyperparameters/). + +::: + + +## End-to-End Testing + +Rasa Open Source lets you test dialogues end-to-end by running through +test conversations and making sure that both NLU and Core make correct predictions. + +To do this, you need some stories in the end-to-end format, +which includes both the NLU output and the original text. +Here are some examples: + +<Tabs values={[{"label": "Basics", "value": "basics"}, {"label": "Custom Actions", "value": "customactions"}, {"label": "Forms Happy Path", "value": "formshappypath"}, {"label": "Forms Unhappy Path", "value": "formsunhappypath"}]} defaultValue="basics"> + <TabItem value="basics"> + + ```story + ## A basic end-to-end test + * greet: hello + - utter_ask_howcanhelp + * inform: show me [chinese](cuisine) restaurants + - utter_ask_location + * inform: in [Paris](location) + - utter_ask_price + ``` + + </TabItem> + <TabItem value="customactions"> + + ```story + ## End-to-End tests where a custom action appends events + * greet: hi + - my_custom_action + <!-- The following events are emitted by `my_custom_action` --> + - slot{"my_slot": "value added by custom action"} + - utter_ask_age + * thankyou: thanks + - utter_noworries + ``` + + </TabItem> + <TabItem value="formshappypath"> + + ```story + ## Testing a conversation with a form + * greet: hi + - utter_greet + * request_restaurant: im looking for a restaurant + - restaurant_form + - active_loop{"name": "restaurant_form"} + * inform: [afghan](cuisine) food + - active_loop: restaurant_form + - active_loop{"name": null} + - utter_slots_values + * thankyou: thanks + - utter_noworries + ``` + + </TabItem> + <TabItem value="formsunhappypath"> + + ```story + ## Testing a conversation with a form and unexpected user input + * greet: hi + - utter_greet + * request_restaurant: im looking for a restaurant + - restaurant_form + - active_loop{"name": "restaurant_form"} + <!-- The user sends a message which should not be handled by the form. --> + * chitchat: can you share your boss with me? + - utter_chitchat + - restaurant_form + - active_loop{"name": null} + - utter_slots_values + * thankyou: thanks + - utter_noworries + ``` + + </TabItem> +</Tabs> + +By default Rasa Open Source saves conversation tests to `tests/conversation_tests.md`. +You can test your assistant against them by running: + +```bash +rasa test +``` + +:::note +[Custom Actions](./actions.mdx#custom-actions) are **not executed as part of end-to-end tests.** If your custom +actions append any events to the tracker, this has to be reflected in your end-to-end +tests (e.g. by adding `slot` events to your end-to-end story). + +To test your custom actions, write unit tests for them and include these +tests in your CI/CD pipeline. + +::: + +If you have any questions or problems, please share them with us in the dedicated +[testing section on our forum](https://forum.rasa.com/tags/testing) ! + +:::note +Make sure your model file in `models` is a combined `core` +and `nlu` model. If it does not contain an NLU model, Core will use +the default `RegexInterpreter`. + +::: + + +## Evaluating an NLU Model + +A standard technique in machine learning is to keep some data separate as a *test set*. +You can [split your NLU training data](./command-line-interface.mdx#create-a-train-test-split) +into train and test sets using: + +```bash +rasa data split nlu +``` + +If you've done this, you can see how well your NLU model predicts the test cases using this command: + +```bash +rasa test nlu -u train_test_split/test_data.md --model models/nlu-20180323-145833.tar.gz +``` + +If you don't want to create a separate test set, you can +still estimate how well your model generalises using cross-validation. +To do this, add the flag `--cross-validation`: + +```bash +rasa test nlu -u data/nlu.md --config config.yml --cross-validation +``` + +The full list of options for the script is: + +```text [rasa test nlu --help] +``` + +### Comparing NLU Pipelines + +By passing multiple pipeline configurations (or a folder containing them) to the CLI, Rasa will run +a comparative examination between the pipelines. + +```bash +rasa test nlu --config pretrained_embeddings_spacy.yml supervised_embeddings.yml + --nlu data/nlu.md --runs 3 --percentages 0 25 50 70 90 +``` + +The command in the example above will create a train/test split from your data, +then train each pipeline multiple times with 0, 25, 50, 70 and 90% of your intent data excluded from the training set. +The models are then evaluated on the test set and the f1-score for each exclusion percentage is recorded. This process +runs three times (i.e. with 3 test sets in total) and then a graph is plotted using the means and standard deviations of +the f1-scores. + +The f1-score graph - along with all train/test sets, the trained models, classification and error reports - will be saved into a folder +called `nlu_comparison_results`. + +### Intent Classification + +The evaluation script will produce a report, confusion matrix, +and confidence histogram for your model. + +The report logs precision, recall and f1 measure for +each intent and entity, as well as providing an overall average. +You can save these reports as JSON files using the `--report` argument. + +The confusion matrix shows you which +intents are mistaken for others; any samples which have been +incorrectly predicted are logged and saved to a file +called `errors.json` for easier debugging. + +The histogram that the script produces allows you to visualise the +confidence distribution for all predictions, +with the volume of correct and incorrect predictions being displayed by +blue and red bars respectively. +Improving the quality of your training data will move the blue +histogram bars to the right and the red histogram bars +to the left of the plot. + +:::caution +If any of your entities are incorrectly annotated, your evaluation may fail. One common problem +is that an entity cannot stop or start inside a token. +For example, if you have an example for a `name` entity +like `[Brian](name)'s house`, this is only valid if your tokenizer splits `Brian's` into +multiple tokens. + +::: + +### Response Selection + +The evaluation script will produce a combined report for all response selector models in your pipeline. + +The report logs precision, recall and f1 measure for +each response, as well as providing an overall average. +You can save these reports as JSON files using the `--report` argument. + +### Entity Extraction + +The `CRFEntityExtractor` is the only entity extractor which you train using your own data, +and so is the only one that will be evaluated. If you use the spaCy or duckling +pre-trained entity extractors, Rasa NLU will not include these in the evaluation. + +Rasa NLU will report recall, precision, and f1 measure for each entity type that +`CRFEntityExtractor` is trained to recognize. + +### Entity Scoring + +To evaluate entity extraction we apply a simple tag-based approach. We don't consider BILOU tags, but only the +entity type tags on a per token basis. For location entity like “near Alexanderplatz” we +expect the labels `LOC LOC` instead of the BILOU-based `B-LOC L-LOC`. Our approach is more lenient +when it comes to evaluation, as it rewards partial extraction and does not punish the splitting of entities. +For example, given the aforementioned entity “near Alexanderplatz” and a system that extracts +“Alexanderplatz”, our approach rewards the extraction of “Alexanderplatz” and punishes the missed out word “near”. +The BILOU-based approach, however, would label this as a complete failure since it expects Alexanderplatz +to be labeled as a last token in an entity (`L-LOC`) instead of a single token entity (`U-LOC`). Note also that +a split extraction of “near” and “Alexanderplatz” would get full scores on our approach and zero on the +BILOU-based one. + +Here's a comparison between the two scoring mechanisms for the phrase “near Alexanderplatz tonight”: + +| extracted |Simple tags (score) | BILOU tags (score) | +|----------------------------------------------------|--------------------|-----------------------| +|`[near Alexanderplatz](loc) [tonight](time)` |loc loc time (3) |B-loc L-loc U-time (3) | +|`[near](loc) [Alexanderplatz](loc) [tonight](time)` |loc loc time (3) |U-loc U-loc U-time (1) | +|`near [Alexanderplatz](loc) [tonight](time)` |O loc time (2) |O U-loc U-time (1) | +|`[near](loc) Alexanderplatz [tonight](time)` |loc O time (2) |U-loc O U-time (1) | +|`[near Alexanderplatz tonight](loc)` |loc loc loc (2) |B-loc I-loc L-loc (1) | + + +## Evaluating a Core Model + +You can evaluate your trained model on a set of test stories +by using the evaluate script: + +```bash +rasa test core --stories test_stories.md --out results +``` + +This will print the failed stories to `results/failed_stories.md`. +We count any story as failed if at least one of the actions +was predicted incorrectly. + +In addition, this will save a confusion matrix to a file called +`results/story_confmat.pdf`. For each action in your domain, the confusion +matrix shows how often the action was correctly predicted and how often an +incorrect action was predicted instead. + +The full list of options for the script is: + +```text [rasa test core --help] +``` + +## Comparing Core Configurations + +To choose a configuration for your core model, or to choose hyperparameters for a +specific policy, you want to measure how well Rasa Core will generalise +to conversations which it hasn't seen before. Especially in the beginning +of a project, you do not have a lot of real conversations to use to train +your bot, so you don't just want to throw some away to use as a test set. + +Rasa Core has some scripts to help you choose and fine-tune your policy configuration. +Once you are happy with it, you can then train your final configuration on your +full data set. To do this, you first have to train models for your different +configurations. Create two (or more) config files including the policies you want to +compare, and then use the `compare` mode of the train script to train your models: + +```bash +rasa train core -c config_1.yml config_2.yml \ + -d domain.yml -s stories_folder --out comparison_models --runs 3 \ + --percentages 0 5 25 50 70 95 +``` + +For each policy configuration provided, Rasa Core will be trained multiple times +with 0, 5, 25, 50, 70 and 95% of your training stories excluded from the training +data. This is done for multiple runs to ensure consistent results. + +Once this script has finished, you can use the evaluate script in `compare` +mode to evaluate the models you just trained: + +```bash +rasa test core -m comparison_models --stories stories_folder + --out comparison_results --evaluate-model-directory +``` + +This will evaluate each of the models on the provided stories +(can be either training or test set) and plot some graphs +to show you which policy performs best. By evaluating on the full set of stories, you +can measure how well Rasa Core is predicting the held-out stories. +To compare single policies create config files containing only one policy each. + +:::note +This training process can take a long time, so we'd suggest letting it run +somewhere in the background where it can't be interrupted. + +::: diff --git a/docs/docs/tracker-dispatcher.mdx b/docs/docs/tracker-dispatcher.mdx new file mode 100644 index 000000000000..6e6815fc3b7b --- /dev/null +++ b/docs/docs/tracker-dispatcher.mdx @@ -0,0 +1,132 @@ +--- +id: tracker-dispatcher +sidebar_label: Tracker & Dispatcher +title: Tracker & Dispatcher +--- + +## Tracker + +The `rasa_sdk.Tracker` lets you access the bot's memory in your custom +actions. You can get information about past events and the current state of the +conversation through `Tracker` attributes and methods. + +The following are available as attributes of a `Tracker` object: + +* `sender_id` - The unique ID of person talking to the bot. + +* `slots` - The list of slots that can be filled as defined in the + “ref”domains. + +* `latest_message` - A dictionary containing the attributes of the latest + message: `intent`, `entities` and `text`. + +* `events` - A list of all previous events. + +* `active_form` - The name of the currently active form. + +* `latest_action_name` - The name of the last action the bot executed. + +### Tracker Methods + +The available methods from the `Tracker` are: + +#### `Tracker.current_state` +Tracker.current_state**: +Return the current tracker state as an object. + + +* **Return type** + + `Dict`[`str`, `Any`] + + + +#### `Tracker.is_paused` +Tracker.is_paused**: +State whether the tracker is currently paused. + + +* **Return type** + + `bool` + + + +#### `Tracker.get_latest_entity_values` +Tracker.get_latest_entity_valuesentity_type, entity_role=None, entity_group=None**: +Get entity values found for the passed entity type and optional role and +group in latest message. + +If you are only interested in the first entity of a given type use +next(tracker.get_latest_entity_values(“my_entity_name”), None). +If no entity is found None is the default result. + + +* **Parameters** + + * **entity_type** – the entity type of interest + + * **entity_role** – optional entity role of interest + + * **entity_group** – optional entity group of interest + + + +* **Returns** + + List of entity values. + + + +* **Return type** + + `Iterator`[`str`] + + + +#### `Tracker.get_latest_input_channel` +Tracker.get_latest_input_channel**: +Get the name of the input_channel of the latest UserUttered event + + +* **Return type** + + `Optional`[`str`] + + + +#### `Tracker.events_after_latest_restart` +Tracker.events_after_latest_restart**: +Return a list of events after the most recent restart. + + +* **Return type** + + `List`[`dict`] + + + +#### `Tracker.get_slot` +Tracker.get_slotkey**: +Retrieves the value of a slot. + + +* **Return type** + + `Optional`[`Any`] + + + +## Dispatcher + +Details of the `dispatcher.utter_message()` method: + + +#### CollectingDispatcher.utter_message +CollectingDispatcher.utter_messagetext=None, image=None, json_message=None, template=None, attachment=None, buttons=None, elements=None, \*\*kwargs**: +“Send a text to the output channel + + +* **Return type** + + `None` diff --git a/docs/docs/tracker-stores.mdx b/docs/docs/tracker-stores.mdx new file mode 100644 index 000000000000..7701c569ead8 --- /dev/null +++ b/docs/docs/tracker-stores.mdx @@ -0,0 +1,426 @@ +--- +id: tracker-stores +sidebar_label: Tracker Stores +title: Tracker Stores +description: All conversations are stored within a tracker store. Read how Rasa Open Source provides implementations for different store types out of the box. +--- +<!-- this file is version specific, do not use `@site/...` syntax --> +import variables from './variables.json'; + +All conversations are stored within a tracker store. +Rasa Open Source provides implementations for different store types out of the box. +If you want to use another store, you can also build a custom tracker store by +extending the `TrackerStore` class. + +## InMemoryTrackerStore (default) + + +* **Description** + + `InMemoryTrackerStore` is the default tracker store. It is used if no other + tracker store is configured. It stores the conversation history in memory. + + :::note + As this store keeps all history in memory, the entire history is lost if you restart the Rasa server. + + ::: + + + +* **Configuration** + + To use the `InMemoryTrackerStore` no configuration is needed. + + +## SQLTrackerStore + + +* **Description** + + `SQLTrackerStore` can be used to store the conversation history in an SQL database. + Storing your trackers this way allows you to query the event database by sender_id, timestamp, action name, + intent name and typename. + + + +* **Configuration** + + To set up Rasa Open Source with SQL the following steps are required: + + 1. Add required configuration to your `endpoints.yml`: + + ```yaml + tracker_store: + type: SQL + dialect: "postgresql" # the dialect used to interact with the db + url: "" # (optional) host of the sql db, e.g. "localhost" + db: "rasa" # path to your db + username: # username used for authentication + password: # password used for authentication + query: # optional dictionary to be added as a query string to the connection URL + driver: my-driver + ``` + + 2. To start the Rasa server using your SQL backend, + add the `--endpoints` flag, e.g.: + + ```bash + rasa run -m models --endpoints endpoints.yml + ``` + + 3. If deploying your model in Docker Compose, add the service to your `docker-compose.yml`: + + ```yaml + postgres: + image: postgres:latest + ``` + + To route requests to the new service, make sure that the `url` in your `endpoints.yml` + references the service name: + + ```yaml {4} + tracker_store: + type: SQL + dialect: "postgresql" # the dialect used to interact with the db + url: "postgres" + db: "rasa" # path to your db + username: # username used for authentication + password: # password used for authentication + query: # optional dictionary to be added as a query string to the connection URL + driver: my-driver + ``` + + + +* **Parameters** + + * `domain` (default: `None`): Domain object associated with this tracker store + + * `dialect` (default: `sqlite`): The dialect used to communicate with your SQL backend. Consult the [SQLAlchemy docs](https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls) for available dialects. + + * `url` (default: `None`): URL of your SQL server + + * `port` (default: `None`): Port of your SQL server + + * `db` (default: `rasa.db`): The path to the database to be used + + * `username` (default: `None`): The username which is used for authentication + + * `password` (default: `None`): The password which is used for authentication + + * `event_broker` (default: `None`): Event broker to publish events to + + * `login_db` (default: `None`): Alternative database name to which initially connect, and create the database specified by `db` (PostgreSQL only) + + * `query` (default: `None`): Dictionary of options to be passed to the dialect and/or the DBAPI upon connect + + + +* **Officially Compatible Databases** + + * PostgreSQL + + * Oracle > 11.0 + + * SQLite + + + +* **Oracle Configuration** + + To use the SQLTrackerStore with Oracle, there are a few additional steps. + First, create a database `tracker` in your Oracle database and create a user with access to it. + Create a sequence in the database with the following command, where username is the user you created + (read more about creating sequences [here](https://docs.oracle.com/cd/B28359_01/server.111/b28310/views002.htm#ADMIN11794)): + + ```sql + CREATE SEQUENCE username.events_seq; + ``` + + Next you have to extend the Rasa Open Source image to include the necessary drivers and clients. + First download the Oracle Instant Client from [here](https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html), + rename it to `oracle.rpm` and store it in the directory from where you'll be building the docker image. + Copy the following into a file called `Dockerfile`: + +<pre><code parentName="pre" className="language-bash"> +{`FROM rasa/rasa:${variables.release}-full + +# Switch to root user to install packages +USER root + +RUN apt-get update -qq && apt-get install -y --no-install-recommends alien libaio1 && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Copy in oracle instaclient +# [https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html](https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html) +COPY oracle.rpm oracle.rpm + +# Install the Python wrapper library for the Oracle drivers +RUN pip install cx-Oracle + +# Install Oracle client libraries +RUN alien -i oracle.rpm + +USER 1001`}</code></pre> + + Then build the docker image: + +<pre><code parentName="pre" className="language-bash"> +{`docker build . -t rasa-oracle:${variables.release}-oracle-full`}</code></pre> + + Now you can configure the tracker store in the `endpoints.yml` as described above, + and start the container. The `dialect` parameter with this setup will be `oracle+cx_oracle`. + Read more about [Deploying Your Rasa Assistant](./how-to-deploy.mdx#deploying-your-rasa-assistant). + + +## RedisTrackerStore + + +* **Description** + + `RedisTrackerStore` can be used to store the conversation history in [Redis](https://redis.io/). + Redis is a fast in-memory key-value store which can optionally also persist data. + + + +* **Configuration** + + To set up Rasa Open Source with Redis the following steps are required: + + 1. Start your Redis instance + + 2. Add required configuration to your `endpoints.yml`: + + ```yaml + tracker_store: + type: redis + url: <url of the redis instance, e.g. localhost> + port: <port of your redis instance, usually 6379> + db: <number of your database within redis, e.g. 0> + password: <password used for authentication> + use_ssl: <whether or not the communication is encrypted, default `false`> + ``` + + 3. To start the Rasa server using your configured Redis instance, + add the `--endpoints` flag, e.g.: + + ```bash + rasa run -m models --endpoints endpoints.yml + ``` + + 4. If deploying your model in Docker Compose, add the service to your `docker-compose.yml`: + + ```yaml + redis: + image: redis:latest + ``` + + To route requests to the new service, make sure that the `url` in your `endpoints.yml` + references the service name: + + ```yaml {3} + tracker_store: + type: redis + url: <url of the redis instance, e.g. localhost> + port: <port of your redis instance, usually 6379> + db: <number of your database within redis, e.g. 0> + password: <password used for authentication> + use_ssl: <whether or not the communication is encrypted, default `false`> + ``` + + + +* **Parameters** + + * `url` (default: `localhost`): The url of your redis instance + + * `port` (default: `6379`): The port which redis is running on + + * `db` (default: `0`): The number of your redis database + + * `password` (default: `None`): Password used for authentication + (`None` equals no authentication) + + * `record_exp` (default: `None`): Record expiry in seconds + + * `use_ssl` (default: `False`): whether or not to use SSL for transit encryption + + +## MongoTrackerStore + + +* **Description** + + `MongoTrackerStore` can be used to store the conversation history in [Mongo](https://www.mongodb.com/). + MongoDB is a free and open-source cross-platform document-oriented NoSQL database. + + + +* **Configuration** + + 1. Start your MongoDB instance. + + 2. Add required configuration to your `endpoints.yml` + + ```yaml + tracker_store: + type: mongod + url: <url to your mongo instance, e.g. mongodb://localhost:27017> + db: <name of the db within your mongo instance, e.g. rasa> + username: <username used for authentication> + password: <password used for authentication> + auth_source: <database name associated with the user's credentials> + ``` + + You can also add more advanced configurations (like enabling ssl) by appending + a parameter to the url field, e.g. mongodb://localhost:27017/?ssl=true + + 3. To start the Rasa server using your configured MongoDB instance, + add the `--endpoints` flag, e.g.: + + ```bash + rasa run -m models --endpoints endpoints.yml + ``` + + 4. If deploying your model in Docker Compose, add the service to your `docker-compose.yml`: + + ```yaml + mongo: + image: mongo + environment: + MONGO_INITDB_ROOT_USERNAME: rasa + MONGO_INITDB_ROOT_PASSWORD: example + mongo-express: # this service is a MongoDB UI, and is optional + image: mongo-express + ports: + - 8081:8081 + environment: + ME_CONFIG_MONGODB_ADMINUSERNAME: rasa + ME_CONFIG_MONGODB_ADMINPASSWORD: example + ``` + + To route requests to this database, make sure to set the `url` in your `endpoints.yml` as the service name, + and specify the user and password: + + ```yaml {3,5,6} + tracker_store: + type: mongod + url: mongodb://mongo:27017 + db: <name of the db within your mongo instance, e.g. rasa> + username: <username used for authentication> + password: <password used for authentication> + auth_source: <database name associated with the user's credentials> + ``` + + + +* **Parameters** + + * `url` (default: `mongodb://localhost:27017`): URL of your MongoDB + + * `db` (default: `rasa`): The database name which should be used + + * `username` (default: `0`): The username which is used for authentication + + * `password` (default: `None`): The password which is used for authentication + + * `auth_source` (default: `admin`): database name associated with the user's credentials. + + * `collection` (default: `conversations`): The collection name which is + used to store the conversations + + +## DynamoTrackerStore + + +* **Description** + + `DynamoTrackerStore` can be used to store the conversation history in + [DynamoDB](https://aws.amazon.com/dynamodb/). DynamoDB is a hosted NoSQL + database offered by Amazon Web Services (AWS). + + + +* **Configuration** + + 1. Start your DynamoDB instance. + + 2. Add required configuration to your `endpoints.yml`: + + ```yaml + tracker_store: + type: dynamo + tablename: <name of the table to create, e.g. rasa> + region: <name of the region associated with the client> + ``` + + 3. To start the Rasa server using your configured `DynamoDB` instance, + add the `--endpoints` flag, e.g.: + + ```bash + rasa run -m models --endpoints endpoints.yml + ``` + + + +* **Parameters** + + * `tablename` (default: `states`): name of the DynamoDB table + + * `region` (default: `us-east-1`): name of the region associated with the client + + +## Custom Tracker Store + + +* **Description** + + If you require a tracker store which is not available out of the box, you can implement your own. + This is done by extending the base class `TrackerStore`. + + - SKIPPED CLASS DOCUMENTATION - + +* **Steps** + + 1. Extend the `TrackerStore` base class. Note that your constructor has to + provide a parameter `url`. + + 2. In your `endpoints.yml` put in the module path to your custom tracker store + and the parameters you require: + + ```yaml + tracker_store: + type: path.to.your.module.Class + url: localhost + a_parameter: a value + another_parameter: another value + ``` + + 3. If you are deploying in Docker Compose, you have two options to add this store to Rasa Open Source: + + * extending the Rasa image to include the module + + * mounting the module as volume + + Make sure to add the corresponding service as well. For example, mounting it as a volume would look like so: + + `docker-compose.yml`: + + ```yaml {5,6,7} + rasa: + <existing rasa service configuration> + volumes: + - <existing volume mappings, if there are any> + - ./path/to/your/module.py:/app/path/to/your/module.py + custom-tracker-store: + image: custom-image:tag + ``` + + `endpoints.yml`: + + ```yaml {3} + tracker_store: + type: path.to.your.module.Class + url: custom-tracker-store + a_parameter: a value + another_parameter: another value + ``` diff --git a/docs/docs/training-data-format.mdx b/docs/docs/training-data-format.mdx new file mode 100644 index 000000000000..4fb28912dc31 --- /dev/null +++ b/docs/docs/training-data-format.mdx @@ -0,0 +1,703 @@ +--- +id: training-data-format +sidebar_label: Training Data Format +title: Training Data Format +description: Description of the YAML format for training data +--- + +## Overview + +Rasa Open Source uses [YAML](https://yaml.org/spec/1.2/spec.html) as +a unified and extendable way to manage all training data, +including NLU data, stories and rules. + +With the YAML format, training data can be split over any number of YAML files, +and every file can contain any kind of data. +The training data parser will read the top level keys in each file to decide +what kind of data is in a section at training time. + +The [domain](glossary.mdx#domain) uses +the same YAML format as the training data and can also be split across +multiple files or combined in one file. The domain includes +the definitions for [responses](domain.mdx#responses) and [forms](forms.mdx). +See the [documentation for the domain](domain.mdx) for information on how to format your domain file. + +:::note Legacy Formats +Looking for Rasa Open Source 1.x data formats? They are now deprecated, but +you can still find the documentation for +[markdown NLU data](https://legacy-docs-v1.rasa.com/rasa/nlu/training-data-format/) and +[markdown stories](https://legacy-docs-v1.rasa.com/rasa/core/stories/). +::: + +### High-Level Structure + +Each file can contain one or more **keys** with corresponding training +data. One file can contain multiple keys, as long as there is not more +than one of a certain key in a single file. The available keys are: + +- `version` +- `nlu` +- `stories` +- `rules` +- `e2e_tests` + +All YAML training data files should specify the `version` key to be parsed +correctly. If you don't specify a version key in your training data file, Rasa +will assume you are using the latest training data format specification supported +by the version of Rasa Open Source you have installed. +Training data files with a version greater than is currently available in +the version of Rasa Open Source you have installed will be +skipped. +Currently, the latest training data format specification for Rasa 2.x is 2.0. + +### Example + +Here's a short example which keeps all training data in a single file: + +```yaml +version: "2.0" + +stories: +- story: greet and faq + steps: + - intent: greet + - action: utter_greet + - intent: faq + - action: respond_faq + +rules: +- rule: Greet user + steps: + - intent: greet + - action: utter_greet + +nlu: +- intent: greet + examples: | + - Hey + - Hi + - hey there [Sara](name) + +- intent: faq/language + examples: | + - What language do you speak? + - Do you only handle english? + +e2e_tests: +- user: | + hey + intent: greet +- action: utter_greet +- user: | + what language do you speak + intent: faq/language +- action: respond_faq +``` + + +## NLU Training Data + +[NLU](glossary.mdx#nlu) training data consists of example user utterances categorized by +**intent**, i.e. what the user is trying to convey or accomplish with their +message. Training examples can also include **entities**. Entities are structured +pieces of information that can be extracted from a user's message. You can also +add extra information such as regular expressions and lookup tables to your +training data to help the model identify intents and entities correctly. + +NLU training data is defined under the `nlu` key. Items that can be added under this key are: + +- [Training examples](#training-examples) grouped by user intent e.g. + optionally with annotated [entities](#entities) + +```yaml +- intent: check_balance + examples: | + - What's my [credit](account) balance? + - What's the balance on my [credit card account]{"entity":"account","value":"credit"} +``` + +- [Synonyms](#synonyms) + +```yaml +- synonym: credit + examples: | + - credit card account + - credit account +``` + +- [Regular expressions](#regular-expression-features) + +```yaml +- regex: account_number + examples: | + - \d{10:12} +``` + +- [Lookup tables](#lookup-tables) + +```yaml +- lookup: banks + examples: | + - JPMC + - Comerica + - Bank of America +``` + +### Training Examples + +Training examples are grouped by [intent](glossary.mdx#intent) and listed under the +`examples` key. Usually, you'll list one example per line as follows: + +```yaml +nlu: +- intent: greet + examples: | + - hey + - hi + - whats up +``` + +However, it's also possible to use an extended format if you have a custom NLU component and need metadata for your examples: + + +```yaml +nlu: +- intent: greet + examples: + - text: | + hi + metadata: + sentiment: neutral + - text: | + hey there! +``` + +The `metadata` key can contain arbitrary key-value data that stays with an example and is accessible by the components in the NLU pipeline. In the example above, the sentiment of +the example could be used by a custom component in the pipeline for sentiment analysis. + +### Entities + +[Entities](glossary.mdx#entity) are structured pieces of information that can be extracted from a user's message. For entity extraction to work, you need to either specify training data to train an ML model or you need to define [regular expressions](#regular-expressions-for-entity-extraction) to extract entities using the [`RegexEntityExtractor`](components/entity-extractors.mdx#regexentityextractor) based on a character pattern. + +Entities are annotated in training examples with minimally the entity's name. +In addition to the entity name, you can annotate an entity with synonyms, roles, or groups. + +The syntax for annotating an entity is: + +```yaml +[<entity-text>]{"entity": "<entity name>", "role": "<role name>", "group": "<group name>", "value": "<entity synonym>"} +``` + +The keywords `role`, `group`, and `value` are optional in this notation. +The `value` keyword refers to synonyms, which are explained in the +following section. To understand what the labels `role` and `group` are +for, see the section on [entity roles and groups](nlu-training-data.mdx#entities-roles-and-groups). + +In training examples, entity annotation would look like this: + +```yaml +nlu: +- intent: check_balance + examples: | + - how much do I have on my [savings]("account") account + - how much money is in my [checking]{"entity": "account"} account + - What's the balance on my [credit card account]{"entity":"account","value":"credit"} + +``` + + +### Synonyms + +Synonyms provide a way to normalize your training data by mapping an +extracted entity to a value other than the literal text extracted. +Synonyms can be defined in the format: + +```yaml +- synonym: credit + examples: | + - credit card account + - credit account +``` + +Synonyms can also be defined in-line in your training examples by +specifying the `value` of the entity: + +```yaml +nlu: +- intent: check_balance + examples: | + - how much do I have on my [credit card account]{"entity": "account", "value": "credit"} + - how much do I owe on my [credit account]{"entity": "account", "value": "credit"} +``` + +To use the synonyms defined in your training data, you need to make sure the +pipeline in your configuration file contains the +[`EntitySynonymMapper`](components/entity-extractors.mdx#entitysynonymmapper) component. You +should define synonyms when there are multiple ways users refer to the same +thing. + +#### Example + +Let's say you had an entity `account`, and you expect the +value "credit". Your users also refer to their "credit" account as "credit +account" and "credit card account". + +In this case, you could define "credit card account" and "credit account" as +**synonyms** to "credit", as in the examples above. + +Then, if either of these phrases is extracted as an entity, it will be +mapped to the **value** `credit`. + +:::note Needs Training Examples +Synonym mapping only happens **after** entities have been extracted. +That means that your training examples should include the synonym examples +(`credit card account` and `credit account`) so that the model will learn to +recognize these as entities and replace them with `credit`. +::: + +### Regular Expressions + +You can use regular expressions to improve intent classification and +entity extraction in combination with the [`RegexFeaturizer`](components/featurizers.mdx#regexfeaturizer) and [`RegexEntityExtractor`](components/entity-extractors.mdx#regexentityextractor) components in the pipeline. + +#### Regular Expressions for Intent Classification + +You can use regular expressions to improve intent classification by including the `RegexFeaturizer` component in your pipeline. When using the `RegexFeaturizer`, a regex does not act as a rule for classifying an intent. It only provides a feature that the intent classifier will use +to learn patterns for intent classification. +Currently, all intent classifiers make use of available regex features. + +The name of a regex in this case is a human readable description. It can help you remember what a regex is used for, and it is the title of the corresponding pattern feature. It does not have to match any intent or entity name. A regex for greeting might look like this: + +```yaml +- regex: greet + examples: | + - hey[^\\s]* +``` + +The intent being matched could be `greet`,`say_hi`, `hallo` or anything else. + +Try to create your regular expressions in a way that they match as few +words as possible. E.g. using `hey[^\\s]*` instead of `hey.*`, as the +later one might match the whole message whereas the first one only +matches a single word. + +#### Regular Expressions for Entity Extraction + +If your entity has a deterministic structure (like an account number), you can use regular expressions in one of two ways: + +1. As features for the [`RegexFeaturizer`](components/featurizers.mdx#regexfeaturizer) component in the pipeline. + + When used as as features for the `RegexFeaturizer` the + name of the regular expression does not matter. + + Regex features for entity extraction + are currently only supported by the `CRFEntityExtractor` and the + `DIETClassifier` component. Other entity extractors, like + `MitieEntityExtractor` or `SpacyEntityExtractor`, won't use the generated + features and their presence will not improve entity recognition for + these extractors. + +2. For rule-based entity extraction using the [`RegexEntityExtractor`](components/entity-extractors.mdx#regexentityextractor) component in the pipeline. + + When using the `RegexEntityExtractor`, the name of the regular expression should + match the name of the entity you want to extract. + +In both cases, the format for defining the regex is the same. +For example, a regex for extracting a bank account number that is 10-12 digits long mightlook like this: + +```yaml +- regex: account_number + examples: | + - \d{10:12} +``` + + +:::note Combining with Regexes + +When using the `RegexFeaturizer`, a regular expression only provides a feature +that helps the model learn an association between intents/entities and inputs +that fit the regular expression. In order for the model to learn this association, +you must provide example inputs that include that regular expression! +::: + + +### Lookup Tables + +Lookup tables provide a convenient way to supply a list of entity +examples. The format is as follows: + +```yaml +- lookup: banks + examples: | + - JPMC + - Bank of America +``` + +The name of the lookup table is subject to the same constraints as the +name of a regex feature. + +When you supply a lookup table in your training data, the contents of that table +are combined into one large regular expression. This regex is used to check +each training example to see if it contains matches for entries in the +lookup table. + +Lookup table regexes are processed identically to the regular +regex patterns directly specified in the training data and can be used +either with the [RegexFeaturizer](components/featurizers.mdx#regexfeaturizer) +or with the [RegexEntityExtractor](components/featurizers.mdx#regexentityextractor). + +:::note Combining with Regexes + +If you are using lookup tables in combination with the `RegexFeaturizer`, +there must be a few examples of matches in your training data. Otherwise the +model will not learn to use the lookup table match features. +::: + +:::warning Keep your lookup tables clean + +You have to be careful when you add data to the lookup table. For example, if there +are false positives or other noise in the table, this can hurt performance. So make +sure your lookup tables contain clean data. +::: + + +## Conversation Training Data + +Stories and rules are both representations of conversations between a user +and a conversational assistant. They are used to train the dialogue management +model. [**Stories**](stories.mdx) are used to train a machine learning model +to identify patterns in conversations and generalize to unseen conversation paths. +**[Rules](rules.mdx)** describe parts of conversations that should always +follow the same path and are used to train the +[RulePolicy](policies.mdx#rule-policy). + + +### Stories + +Stories are composed of: + + - `story`: The story's name. The name is arbitrary and not used in training; + you can use it as a human-readable reference for the story. + - `metadata`: arbitrary and optional, not used in training, + you can use it to store relevant information about the story + like e.g. the author + - a list of `steps`: The user messages and actions that make up the story + +For example: + +```yaml +stories: +- story: Greet the user + metadata: + author: Somebody + key: value + steps: + # list of steps + - intent: greet + - action: utter_greet +``` + +Each step can be one of the following: + + - A [user message](#user-messages), represented by **intent** and **entities**. + - An [or statement](#or), which includes two or more user messages under it + - A bot [action](#actions) + - A [form](#forms) + - A [slot was set](#slots) event + - A [checkpoint](#checkpoints), which connects the story to another story + + +#### User Messages + +All user messages are specified with the `intent:` +key and an optional `entities:` key. + +While writing stories, you do not have to deal with the specific +contents of the messages that the users send. Instead, you can take +advantage of the output from the NLU pipeline, which lets you use +the combination of an intent and entities to refer to all the possible +messages the users can send to mean the same thing. + +User messages follow the format: + +```yaml + steps: + - intent: <intent name> # Required + entities: # Optional + - <entity_name>: <entity value> +``` + +For example, to represent the sentence +`I want to check my credit balance`, where `credit` is an entity: + +```yaml {3-5} +- story: story with entities + steps: + - intent: account_balance + entities: + - account_type: credit + - action: action_credit_account_balance +``` + +It is important to include the entities here as well because the +policies learn to predict the next action based on a *combination* of +both the intent and entities (you can, however, change this behavior +using the [`use_entities`](#use-entities) attribute). + + +#### Actions + +All actions executed by the bot are specified with the `action:` key followed +by the name of the action. +While writing stories, you will encounter three types of actions: + + +1. [**Responses**](domain.mdx#responses): start with `utter_` and + send a specific message to the user. e.g. + +```yaml {4} +- story: story with a response + steps: + - intent: greet + - action: utter_greet +``` + +2. [**Retrieval actions**](retrieval-actions.mdx): start with `respond_` + and send a message selected by a retrieval model. e.g. + +```yaml {4} +- story: story with a retrieval action + steps: + - intent: faq + - action: respond_faq +``` + +3. [**Custom actions**](custom-actions.mdx): start with `action_`, run + arbitrary code and send any number of messages (or none). + +```yaml {4} +- story: story with a custom action + steps: + - intent: feedback + - action: action_store_feedback +``` + +#### Forms + + +A [form](glossary.mdx#form) is a specific kind of custom action that contains the logic to loop over +a set of required slots and ask the user for this information. You +[define a form](forms.mdx#defining-a-form) in the `forms` section in your domain. +Once defined, the [happy path](glossary.mdx#happy-unhappy-paths) +for a form should be specified as a [rule](forms.mdx), but interruptions of forms or +other "unhappy paths" should be included in stories so that the model can +generalize to unseen conversation sequences. +As a step in a story, a form takes the following basic format: + + +```yaml +- story: story with a form + steps: + - intent: find_restaurant + - action: restaurant_form # Activate the form + - active_loop: restaurant_form # This form is currently active + - active_loop: null # Form complete, no form is active + - action: utter_restaurant_found +``` + + +The `action` step activates the form and begins looping over the required slots. The `active_loop: restaurant_form` +step indicates that there is a currently active form. Much like a `slot_was_set` step, +a `form` step doesn't **set** a form to active but indicates that it should already be activated. +In the same way, the `active_loop: null` step indicates that no form should be active before the subsequent +steps are taken. + +A form can be interrupted and remain active; in this case the interruption should come after the +`action: <form to activate>` step and be followed by the `active_loop: <active form>` step. +An interruption of a form could look like this: + +```yaml +- rule: interrupted food + steps: + - intent: request_restaurant + - action: restaurant_form + - intent: chitchat + - action: utter_chitchat + - active_loop: restaurant_form + - active_loop: null + - action: utter_slots_values +``` + + +#### Slots + +A slot event is specified under the key `slot_was_set:` with the +slot name and optionally the slot's value. + +**[Slots](domain.mdx#slots)** act as the bots memory. +Slots are **set** by entities or by custom actions and **referenced** +by stories in `slot_was_set` steps. For example: + +```yaml {4-5} +- story: story with a slot + steps: + - intent: celebrate_bot + - slot_was_set: + - feedback_value: positive + - action: utter_yay +``` + +This means the story requires that the current value for the `feedback_value` +slot be `positive` for the conversation to continue as specified. + +Whether or not you need to include the slot's value depends on the +[slot type](domain.mdx#slot-types) and whether the value can or should +influence the dialogue. If the value doesn't matter, list only +the slot's name: + +```yaml {4-5} +- story: story with a slot + steps: + - intent: greet + - slot_was_set: + - name + - action: utter_greet_user_by_name +``` + + +:::note How slots work +Stories do not **set** slots. The slot must be set by an entity or custom +action **before** the `slot_was_set` step. +::: + + +#### Checkpoints + +Checkpoints are specified with the `checkpoint:` key, either at the beginning +or the end of a story. + + +Checkpoints are ways to connect stories together. They can be either the first +or the last step in a story. If they are the last step in a story, that story +will be connected to each other story that starts with the checkpoint of the +same name when the model is trained. Here is an example of a story that ends +with a checkpoint, and one that starts with the same checkpoint: + +```yaml +- story: story_with_a_checkpoint_1 + steps: + - intent: greet + - action: utter_greet + - checkpoint: greet_checkpoint + +- story: story_with_a_checkpoint_2 + steps: + - checkpoint: greet_checkpoint + - intent: book_flight + - action: action_book_flight +``` + +Checkpoints at the beginning of stories can also be conditional on +slots being set, for example: + +```yaml {5-7} +- story: story_with_a_conditional_checkpoint + steps: + - checkpoint: greet_checkpoint + # This checkpoint should only apply if slots are set to the specified value + slots: + - context_scenario: holiday + - holiday_name: thanksgiving + - intent: greet + - action: utter_greet_thanksgiving +``` + + +Checkpoints can help simplify your training data and reduce redundancy in it, +but **do not overuse them**. Using lots of checkpoints can quickly make your +stories hard to understand. It makes sense to use them if a sequence of steps +is repeated often in different stories, but stories without checkpoints +are easier to read and write. + +#### OR statement + +`or` steps are ways to handle multiple intents the same way, +without writing a separate story for each intent. For example, if you ask the user to +confirm something, you might want to treat the `affirm` and `thankyou` intents in the +same way. Stories with `or` steps will be converted into multiple +separate stories at training time. For example, the following story would be converted +to two stories at training time: + +```yaml {5-7} +- story: story with OR + steps: + - intent: signup_newsletter + - action: utter_ask_confirm + - or: + - intent: affirm + - intent: thanks + - action: action_signup_newsletter +``` + +Just like checkpoints, OR statements can be useful, but if you are using a lot of them, +it is probably better to restructure your domain and/or intents. + +:::warning Don't overuse +Overusing these features (both checkpoints and OR statements) will slow down training. +::: + +### Rules + +Rules are listed under the `rules` key and look similar to stories. A rule also has a `steps` +key, which contains a list of the same steps as stories do. Rules can additionally +contain the `conversation_started` and `conditions` keys. These are used to specify conditions +under which the rule should apply. + +A rule that with a condition looks like this: + +```yaml +rules: +- rule: Only say `hey` when the user provided a name + condition: + - slot_was_set: + - user_provided_name: true + steps: + - intent: greet + - action: utter_greet +``` + +Read more about writing rules [here](rules.mdx#writing-a-rule). + +## Test Conversations + +Test conversations combine both NLU and Core training data into a end-to-end story +for evaluation. + +:::info Test Only +This format is only used for end-to-end evaluation and cannot be used for training. +::: + +Test conversations are listed under the `e2e_tests` key. +Their format is similar to the [story](#stories) format, +except that user message steps can include a `user` key which specifies the actual +text and entity annotation of the user message. + +Here's an example of a test conversation: + +```yaml +e2e_tests: +- story: A basic end-to-end test + steps: + - user: | + hey + intent: greet + - action: utter_ask_howcanhelp + - user: | + show me [chinese]{"entity": "cuisine"} restaurants + intent: inform + - action: utter_ask_location + - user: | + in [Paris]{"entity": "location"} + intent: inform + - action: utter_ask_price +``` diff --git a/docs/docs/training-data-importers.mdx b/docs/docs/training-data-importers.mdx new file mode 100644 index 000000000000..801801804372 --- /dev/null +++ b/docs/docs/training-data-importers.mdx @@ -0,0 +1,209 @@ +--- +id: training-data-importers +sidebar_label: Importers +title: Training Data Importers +description: Change the way Rasa imports training data by replacing the default importer or writing your own importer. +--- + +By default, you can use command line arguments to specify where Rasa should look +for training data on your disk. Rasa then loads any potential training files and uses +them to train your assistant. + +If needed, you can also customize how Rasa imports training data. +Potential use cases for this might be: + +* using a custom parser to load training data in other formats + +* using different approaches to collect training data (e.g. loading them from different resources) + +You can instruct Rasa to load and use your custom importer by adding the section +`importers` to the Rasa configuration file and specifying the importer with its +full class path: + +```yaml +importers: +- name: "module.CustomImporter" + parameter1: "value" + parameter2: "value2" +- name: "module.AnotherCustomImporter" +``` + +The `name` key is used to determine which importer should be loaded. Any extra +parameters are passed as constructor arguments to the loaded importer. + +:::note +You can specify multiple importers. Rasa will automatically merge their results. + +::: + +## RasaFileImporter (default) + +By default Rasa uses the importer `RasaFileImporter`. If you want to use it on its +own, you don't have to specify anything in your configuration file. +If you want to use it together with other importers, add it to your +configuration file: + +```yaml +importers: +- name: "RasaFileImporter" +``` + +## MultiProjectImporter (experimental) + +:::caution +This feature is currently experimental and might change or be removed in the future. +Please share your feedback on it in the [forum](https://forum.rasa.com) to help +us making this feature ready for production. + +::: + +With this importer you can build a contextual AI assistant by combining multiple +reusable Rasa projects. +You might, for example, handle chitchat with one project and greet your users with +another. These projects can be developed in isolation, and then combined at train time +to create your assistant. + +An example directory structure could look like this: + +```bash +. +├── config.yml +└── projects + ├── GreetBot + │   ├── data + │   │   ├── nlu.md + │   │   └── stories.md + │   └── domain.yml + └── ChitchatBot + ├── config.yml + ├── data + │   ├── nlu.md + │   └── stories.md + └── domain.yml +``` + +In this example the contextual AI assistant imports the `ChitchatBot` project which in turn +imports the `GreetBot` project. Project imports are defined in the configuration files of +each project. +To instruct Rasa to use the `MultiProjectImporter` module, put this section in the config +file of your root project: + +```yaml +importers: +- name: MultiProjectImporter +``` + +Then specify which projects you want to import. +In our example, the `config.yml` in the root project would look like this: + +```yaml +imports: +- projects/ChitchatBot +``` + +The configuration file of the `ChitchatBot` in turn references the `GreetBot`: + +```yaml +imports: +- ../GreetBot +``` + +The `GreetBot` project does not specify further projects so the `config.yml` can be +omitted. + +Rasa uses relative paths from the referencing configuration file to import projects. +These can be anywhere on your file system as long as the file access is permitted. + +During the training process Rasa will import all required training files, combine +them, and train a unified AI assistant. The merging of the training data happens during +runtime, so no additional files with training data are created or visible. + +:::note +Rasa will use the policy and NLU pipeline configuration of the root project +directory during training. **Policy or NLU configurations of imported projects +will be ignored.** + +::: + +:::note +Equal intents, entities, slots, responses, actions and forms will be merged, +e.g. if two projects have training data for an intent `greet`, +their training data will be combined. + +::: + +## Writing a Custom Importer + +If you are writing a custom importer, this importer has to implement the interface of +[TrainingDataImporter](./training-data-importers.mdx#training-data-importers-trainingfileimporter): + +```python +from typing import Optional, Text, Dict, List, Union + +import rasa +from rasa.core.domain import Domain +from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter +from rasa.core.training.structures import StoryGraph +from rasa.importers.importer import TrainingDataImporter +from rasa.nlu.training_data import TrainingData + + +class MyImporter(TrainingDataImporter): + """Example implementation of a custom importer component.""" + + def __init__( + self, + config_file: Optional[Text] = None, + domain_path: Optional[Text] = None, + training_data_paths: Optional[Union[List[Text], Text]] = None, + **kwargs: Dict + ): + """Constructor of your custom file importer. + + Args: + config_file: Path to configuration file from command line arguments. + domain_path: Path to domain file from command line arguments. + training_data_paths: Path to training files from command line arguments. + **kwargs: Extra parameters passed through configuration in configuration file. + """ + + pass + + async def get_domain(self) -> Domain: + path_to_domain_file = self._custom_get_domain_file() + return Domain.load(path_to_domain_file) + + def _custom_get_domain_file(self) -> Text: + pass + + async def get_stories( + self, + interpreter: "NaturalLanguageInterpreter" = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: + from rasa.core.training.dsl import StoryFileReader + + path_to_stories = self._custom_get_story_file() + return await StoryFileReader.read_from_file(path_to_stories, await self.get_domain()) + + def _custom_get_story_file(self) -> Text: + pass + + async def get_config(self) -> Dict: + path_to_config = self._custom_get_config_file() + return rasa.utils.io.read_config_file(path_to_config) + + def _custom_get_config_file(self) -> Text: + pass + + async def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: + from rasa.nlu.training_data import loading + + path_to_nlu_file = self._custom_get_nlu_file() + return loading.load_data(path_to_nlu_file) + + def _custom_get_nlu_file(self) -> Text: + pass +``` \ No newline at end of file diff --git a/docs/docs/tuning-your-model.mdx b/docs/docs/tuning-your-model.mdx new file mode 100644 index 000000000000..cbb90070ea1d --- /dev/null +++ b/docs/docs/tuning-your-model.mdx @@ -0,0 +1,374 @@ +--- +id: tuning-your-model +sidebar_label: Tuning Your Model +title: Tuning Your Model +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +## How to Choose a Pipeline + +In Rasa Open Source, incoming messages are processed by a sequence of components. +These components are executed one after another in a so-called processing `pipeline` defined in your `config.yml`. +Choosing an NLU pipeline allows you to customize your model and finetune it on your dataset. + +To get started, you can let the +[Suggested Config](.//model-configuration.mdx#suggested-config) feature choose a +default pipeline for you. +Just provide your bot's `language` in the `config.yml` file and leave the `pipeline` key +out or empty. + +```yaml +language: fr # your 2-letter language code + +pipeline: +# intentionally left empty +``` + +### Sensible Starting Pipelines + +We recommend using following pipeline, if your training data is in English: + +```yaml (docs/sources/data/configs_for_docs/default_english_config.yml) +``` + +The pipeline contains the [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer) that provides pre-trained word embeddings of the user utterance. +Pre-trained word embeddings are helpful as they already encode some kind of linguistic knowledge. +For example, if you have a sentence like “I want to buy apples” in your training data, and Rasa is asked to predict +the intent for “get pears”, your model already knows that the words “apples” and “pears” are very similar. +This is especially useful if you don't have enough training data. +The advantage of the [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer) is that it doesn't treat each word of the user message independently, but +creates a contextual vector representation for the complete sentence. +However, `ConveRT` is only available in English. + +If your training data is not in English, but you still want to use pre-trained word embeddings, we recommend using +the following pipeline: + +```yaml (docs/sources/data/configs_for_docs/default_spacy_config.yml) +``` + +It uses the [SpacyFeaturizer](./components/featurizers.mdx#spacyfeaturizer) instead of the [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer). +[SpacyFeaturizer](./components/featurizers.mdx#spacyfeaturizer) provides pre-trained word embeddings from either GloVe or fastText in many different languages +(see [Language Models](./components/language-models.mdx)). + +If you don't use any pre-trained word embeddings inside your pipeline, you are not bound to a specific language +and can train your model to be more domain specific. +If there are no word embeddings for your language or you have very domain specific terminology, +we recommend using the following pipeline: + +```yaml (docs/sources/data/configs_for_docs/default_config.yml) +``` + +:::note +If you want to use custom components in your pipeline, see [Custom NLU Components](./components/custom-nlu-components.mdx). + +::: + + +### Component Lifecycle + +Each component processes an input and/or creates an output. The order of the components is determined by +the order they are listed in the `config.yml`; the output of a component can be used by any other component that +comes after it in the pipeline. Some components only produce information used by other components +in the pipeline. Other components produce `output` attributes that are returned after +the processing has finished. + +For example, for the sentence `"I am looking for Chinese food"`, the output is: + +```json +{ + "text": "I am looking for Chinese food", + "entities": [ + { + "start": 8, + "end": 15, + "value": "chinese", + "entity": "cuisine", + "extractor": "DIETClassifier", + "confidence": 0.864 + } + ], + "intent": {"confidence": 0.6485910906220309, "name": "restaurant_search"}, + "intent_ranking": [ + {"confidence": 0.6485910906220309, "name": "restaurant_search"}, + {"confidence": 0.1416153159565678, "name": "affirm"} + ] +} +``` + +This is created as a combination of the results of the different components in the following pipeline: + +```yaml +pipeline: + - name: WhitespaceTokenizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + - name: EntitySynonymMapper + - name: ResponseSelector +``` + +For example, the `entities` attribute here is created by the `DIETClassifier` component. + +Every component can implement several methods from the `Component` base class; in a pipeline these different methods +will be called in a specific order. Assuming we added the following pipeline to our `config.yml`: + +```yaml +pipeline: + - name: "Component A" + - name: "Component B" + - name: "Last Component" +``` + +The image below shows the call order during the training of this pipeline: + + + +<img alt="Component Lifecycle" src={useBaseUrl("/img/component_lifecycle.png")} /> + +Before the first component is created using the `create` function, a so +called `context` is created (which is nothing more than a python dict). +This context is used to pass information between the components. For example, +one component can calculate feature vectors for the training data, store +that within the context and another component can retrieve these feature +vectors from the context and do intent classification. + +Initially the context is filled with all configuration values. The arrows +in the image show the call order and visualize the path of the passed +context. After all components are trained and persisted, the +final context dictionary is used to persist the model's metadata. + +### Multi-Intent Classification + +You can use Rasa Open Source components to split intents into multiple labels. For example, you can predict +multiple intents (`thank+goodbye`) or model hierarchical intent structure (`feedback+positive` being more similar +to `feedback+negative` than `chitchat`). +To do this, you need to use the [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) in your pipeline. +You'll also need to define these flags in whichever tokenizer you are using: + +* `intent_tokenization_flag`: Set it to `True`, so that intent labels are tokenized. + +* `intent_split_symbol`: Set it to the delimiter string that splits the intent labels. In this case `+`, default `_`. + +Read a [tutorial](https://blog.rasa.com/how-to-handle-multiple-intents-per-input-using-rasa-nlu-tensorflow-pipeline/) +on how to use multiple intents in Rasa. + +Here's an example configuration: + +```yaml +language: "en" + +pipeline: +- name: "WhitespaceTokenizer" + intent_tokenization_flag: True + intent_split_symbol: "_" +- name: "CountVectorsFeaturizer" +- name: "DIETClassifier" +``` + +### Comparing Pipelines + +Rasa gives you the tools to compare the performance of multiple pipelines on your data directly. +See [Comparing NLU Pipelines](./testing-your-assistant.mdx#comparing-nlu-pipelines) for more information. + +:::note +Intent classification is independent of entity extraction. So sometimes +NLU will get the intent right but entities wrong, or the other way around. +You need to provide enough data for both intents and entities. + +::: + + +## Choosing the Right Components + +There are components for entity extraction, for intent classification, response selection, +pre-processing, and others. +If you want to add your own component, for example to run a spell-check or to +do sentiment analysis, check out [Custom NLU Components](./components/custom-nlu-components.mdx). + +A pipeline usually consists of three main parts: + +### Tokenization + +For tokenization of English input, we recommend the [ConveRTTokenizer](./components/tokenizers.mdx#converttokenizer). +You can process other whitespace-tokenized (words are separated by spaces) languages +with the [WhitespaceTokenizer](./components/tokenizers.mdx#swhitespacetokenizer). If your language is not whitespace-tokenized, you should use a different tokenizer. +We support a number of different [tokenizers](./components/tokenizers.mdx), or you can +create your own [custom tokenizer](./components/custom-nlu-components.mdx). + +:::note +Some components further down the pipeline may require a specific tokenizer. You can find those requirements +on the individual components' `requires` parameter. If a required component is missing inside the pipeline, an +error will be thrown. + +::: + +### Featurization + +You need to decide whether to use components that provide pre-trained word embeddings or not. We recommend in cases +of small amounts of training data to start with pre-trained word embeddings. Once you have a larger amount of data +and ensure that most relevant words will be in your data and therefore will have a word embedding, supervised +embeddings, which learn word meanings directly from your training data, can make your model more specific to your domain. +If you can't find a pre-trained model for your language, you should use supervised embeddings. + +#### Pre-trained Embeddings + +The advantage of using pre-trained word embeddings in your pipeline is that if you have a training example like: +“I want to buy apples”, and Rasa is asked to predict the intent for “get pears”, your model already knows that the +words “apples” and “pears” are very similar. This is especially useful if you don't have enough training data. +We support a few components that provide pre-trained word embeddings: + +1. [MitieFeaturizer](./components/featurizers.mdx#mitiefeaturizer) + +2. [SpacyFeaturizer](./components/featurizers.mdx#spacyfeaturizer) + +3. [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer) + +4. [LanguageModelFeaturizer](./components/featurizers.mdx#languagemodelfeaturizer) + +If your training data is in English, we recommend using the [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer). +The advantage of the [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer) is that it doesn't treat each word of the user message independently, but +creates a contextual vector representation for the complete sentence. For example, if you +have a training example, like: “Can I book a car?”, and Rasa is asked to predict the intent for “I need a ride from +my place”, since the contextual vector representation for both examples are already very similar, the intent classified +for both is highly likely to be the same. This is also useful if you don't have enough training data. + +An alternative to [ConveRTFeaturizer](./components/featurizers.mdx#convertfeaturizer) is the [LanguageModelFeaturizer](./components/featurizers.mdx#languagemodelfeaturizer) which uses pre-trained language +models such as BERT, GPT-2, etc. to extract similar contextual vector representations for the complete sentence. See +[HFTransformersNLP](./components/language-models.mdx#hftransformersnlp) for a full list of supported language models. + +If your training data is not in English you can also use a different variant of a language model which +is pre-trained in the language specific to your training data. +For example, there are chinese (`bert-base-chinese`) and japanese (`bert-base-japanese`) variants of the BERT model. +A full list of different variants of +these language models is available in the +[official documentation of the Transformers library](https://huggingface.co/transformers/pretrained_models.html). + +[spacynlp](./components/featurizers.mdx#spacyfeaturizer) also provides word embeddings in many different languages, +so you can use this as another alternative, depending on the language of your training data. + +#### Supervised Embeddings + +If you don't use any pre-trained word embeddings inside your pipeline, you are not bound to a specific language +and can train your model to be more domain specific. For example, in general English, the word “balance” is closely +related to “symmetry”, but very different to the word “cash”. In a banking domain, “balance” and “cash” are closely +related and you'd like your model to capture that. +You should only use featurizers from the category [sparse featurizers](./components/featurizers.mdx#text-featurizers), such as +[CountVectorsFeaturizer](./components/featurizers.mdx#countvectorsfeaturizer), [RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer) or [LexicalSyntacticFeaturizer](./components/featurizers.mdx#lexicalsyntacticfeaturizer), if you don't want to use +pre-trained word embeddings. + +### Intent Classification / Response Selectors + +Depending on your data you may want to only perform intent classification, entity recognition or response selection. +Or you might want to combine multiple of those tasks. We support several components for each of the tasks. +We recommend using [DIETClassifier](./components/intent-classifiers.mdx#dietclassifier) for intent classification and entity recognition +and [ResponseSelector](./components/selectors.mdx#responseselector) for response selection. + +By default all of these components consume all available features produced in the pipeline. +However, sometimes it makes sense to restrict the features that are used by a specific component. +For example, [ResponseSelector](./components/selectors.mdx#responseselector) is likely to perform better if no features from the +[RegexFeaturizer](./components/featurizers.mdx#regexfeaturizer) or [LexicalSyntacticFeaturizer](./components/featurizers.mdx#lexicalsyntacticfeaturizer) are used. +To achieve that, you can do the following: +Set an alias for every featurizer in your pipeline via the option `alias`. +By default the alias is set the the full featurizer class name, for example, `RegexFeaturizer`. +You can then specify, for example, on the [ResponseSelector](./components/selectors.mdx#responseselector) via the option `featurizers` what features from +which featurizers should be used. +If you don't set the option `featurizers` all available features will be used. + +Here is an example configuration file where the `DIETClassifier` is using all available features and the +`ResponseSelector` is just using the features from the `ConveRTFeaturizer` and the `CountVectorsFeaturizer`. + +```yaml (docs/sources/data/configs_for_docs/config_featurizers.yml) +``` + +### Entity Extraction + +Entity extraction involves parsing user messages for required pieces of information. Rasa Open Source +provides entity extractors for custom entities as well as pre-trained ones like dates and locations. +Here is a summary of the available extractors and what they are best used for: + +| Component | Requires | Model | Notes | +|------------------------|------------------|-------------------------------------------------|----------------------------------| +|`DIETClassifier` | N/A |conditional random field on top of a transformer |good for training custom entities | +|`CRFEntityExtractor` |sklearn-crfsuite |conditional random field |good for training custom entities | +|`SpacyEntityExtractor` |spaCy |averaged perceptron |provides pre-trained entities | +|`DucklingHTTPExtractor` |running duckling |context-free grammar |provides pre-trained entities | +|`MitieEntityExtractor` |MITIE |structured SVM |good for training custom entities | +|`EntitySynonymMapper` |existing entities |N/A |maps known synonyms | + +## Handling Class Imbalance + +Classification algorithms often do not perform well if there is a large class imbalance, +for example if you have a lot of training data for some intents and very little training data for others. +To mitigate this problem, you can use a `balanced` batching strategy. +This algorithm ensures that all classes are represented in every batch, or at least in +as many subsequent batches as possible, still mimicking the fact that some classes are more frequent than others. +Balanced batching is used by default. In order to turn it off and use a classic batching strategy include +`batch_strategy: sequence` in your config file. + +```yaml +language: "en" + +pipeline: +# - ... other components +- name: "DIETClassifier" + batch_strategy: sequence +``` + + +## Configuring Tensorflow + +TensorFlow allows configuring options in the runtime environment via +[TF Config submodule](https://www.tensorflow.org/api_docs/python/tf/config). Rasa Open Source supports a smaller subset of these +configuration options and makes appropriate calls to the `tf.config` submodule. +This smaller subset comprises of configurations that developers frequently use with Rasa Open Source. +All configuration options are specified using environment variables as shown in subsequent sections. + +### Optimizing CPU Performance + +:::note +We recommend that you configure these options only if you are an advanced TensorFlow user and understand the +implementation of the machine learning components in your pipeline. These options affect how operations are carried +out under the hood in Tensorflow. Leaving them at their default values is fine. + +::: + +Depending on the TensorFlow operations a NLU component or Core policy uses, you can leverage multi-core CPU +parallelism by tuning these options. + +#### Parallelizing One Operation + +Set `TF_INTRA_OP_PARALLELISM_THREADS` as an environment variable to specify the maximum number of threads that can be used +to parallelize the execution of one operation. For example, operations like `tf.matmul()` and `tf.reduce_sum` can be executed +on multiple threads running in parallel. The default value for this variable is `0` which means TensorFlow would +allocate one thread per CPU core. + +#### Parallelizing Multiple Operations + +Set `TF_INTER_OP_PARALLELISM_THREADS` as an environment variable to specify the maximum number of threads that can be used +to parallelize the execution of multiple **non-blocking** operations. These would include operations that do not have a +directed path between them in the TensorFlow graph. In other words, the computation of one operation does not affect the +computation of the other operation. The default value for this variable is `0` which means TensorFlow would allocate one thread per CPU core. + +To understand more about how these two options differ from each other, refer to this +[stackoverflow thread](https://stackoverflow.com/questions/41233635/meaning-of-inter-op-parallelism-threads-and-intra-op-parallelism-threads/41233901#41233901). + +### Optimizing GPU Performance + +#### Limiting GPU Memory Growth + +TensorFlow by default blocks all the available GPU memory for the running process. This can be limiting if you are running +multiple TensorFlow processes and want to distribute memory across them. To prevent Rasa Open Source from blocking all +of the available GPU memory, set the environment variable `TF_FORCE_GPU_ALLOW_GROWTH` to `True`. + +#### Restricting Absolute GPU Memory Available + +You may want to limit the absolute amount of GPU memory that can be used by a Rasa Open Source process. + +For example, say you have two visible GPUs(`GPU:0` and `GPU:1`) and you want to allocate 1024 MB from the first GPU +and 2048 MB from the second GPU. You can do this by setting the environment variable `TF_GPU_MEMORY_ALLOC` to `"0:1024, 1:2048"`. diff --git a/docs/docs/unexpected-input.mdx b/docs/docs/unexpected-input.mdx new file mode 100644 index 000000000000..743d4c2ce05d --- /dev/null +++ b/docs/docs/unexpected-input.mdx @@ -0,0 +1,241 @@ +--- +id: unexpected-input +sidebar_label: Handling Unexpected Input +title: Handling Unexpected Input +--- + +import useBaseUrl from '@docusaurus/useBaseUrl'; + +<a aria-hidden="true" tabIndex="-1" className="anchor enhancedAnchor" id="handling-unexpected-user-input"></a> + +All expected user inputs should be handled by the form we defined above, i.e. if the +user provides the information the bot asks for. However, in real situations, the user +will often behave differently. In this section we'll go through various forms of +“interjections” and how to handle them within Rasa. + +The decision to handle these types of user input should always come from reviewing +real conversations. You should first build part of your assistant, test it with real users +(whether that's your end user, or your colleague) and then add what's missing. You shouldn't +try to implement every possible edge case that you think might happen, because in the end +your users may never actually behave in that way. +[Rasa X](https://rasa.com/docs/rasa-x/) +is a tool that can help you review conversations and make these types of decisions. + +## Generic interjections + +If you have generic interjections that should always have the same single response no +matter the context, you can use the [Mapping Policy](./policies.mdx#mapping-policy) to handle these. It will always +predict the same action for an intent, and when combined with a forgetting mechanism, +you don't need to write any stories either. + +For example, let's say you see users having conversations like the following one with +your assistant, where they write a greeting in the middle of a conversation - +maybe because they were gone for a few minutes: + + + +<img alt="Greeting Interjection" src={useBaseUrl("/img/greet_interjection.png")} width="240" /> + +The greet intent is a good example where we will always give the same response and +yet we don't want the intent to affect the dialogue history. To do this, the response +must be an action that returns the `UserUtteranceReverted()` event to remove the +interaction from the dialogue history. + +First, open the `domain.yml` file and modify the greet intent and add a new block `\`actions\`` in +the file, next, add the `action_greet` as shown here: + +```yaml +intents: + - greet: {triggers: action_greet} + - bye + - thank + - faq + - contact_sales + - inform + +actions: + - action_greet +``` + +Remove any stories using the “greet” intent if you have them. + +Next, we need to define `action_greet`. Add the following action to your `actions.py` file: + +```python +from rasa_sdk import Action +from rasa_sdk.events import UserUtteranceReverted + +class ActionGreetUser(Action): +"""Revertible mapped action for utter_greet""" + +def name(self): + return "action_greet" + +def run(self, dispatcher, tracker, domain): + dispatcher.utter_template("utter_greet", tracker) + return [UserUtteranceReverted()] +``` + +To test the modified intents, we need to re-start our action server: + +```bash +rasa run actions +``` + +Then we can retrain the model, and try out our additions: + +```bash +rasa train +rasa shell +``` + +FAQs are another kind of generic interjections that should always get the same response. +For example, a user might ask a related FAQ in the middle of filling a form: + + + +<img alt="Generic Interjections" src={useBaseUrl("/img/generic_interjection.png")} width="240" /> + +To handle FAQs defined with retrieval actions, you can add a simple story that will be handled by the MemoizationPolicy: + +```md +## just sales, continue +* contact_sales + - sales_form + - active_loop{"name": "sales_form"} +* faq + - respond_faq + - sales_form + - active_loop{"name": null} +``` + +This will break out of the form and deal with the users FAQ question, and then return back to the original task. +For example: + + + +<img alt="Generic Interjection Handled" src={useBaseUrl("/img/generic_interjection_handled.png")} width="240" /> + +If you find it difficult to write stories in this format, you can always use [Interactive Learning](./writing-stories.mdx#using-interactive-learning) +to help you create them. + +As always, make sure to add an end to end test case to your test_stories.md file. + +## Contextual questions + +You can also handle contextual questions, +like the user asking the question “Why do you need to know that”. The user could ask this based on a certain slot +the bot has requested, and the response should differ for each slot. For example: + + + +<img alt="Contextual Interjection" src={useBaseUrl("/img/contextual_interjection.png")} width="240" /> + +To handle this, we need to make the `requested_slot` featurized, and assign it the categorical type: + +```yaml +slots: + requested_slot: + type: categorical + values: + - business_email + - company + - person_name + - use_case + - budget + - job_function +``` + +This means that Core will pay attention to the value of the slot when making a prediction +(read more about other [featurized slots](./domain.mdx#slots), whereas +unfeaturized slots are only used for storing information. The stories for this should look as follows: + +```md +## explain email +* contact_sales + - sales_form + - active_loop{"name": "sales_form"} + - slot{"requested_slot": "business_email"} +* explain + - utter_explain_why_email + - sales_form + - active_loop{"name": null} + +## explain budget +* contact_sales + - sales_form + - active_loop{"name": "sales_form"} + - slot{"requested_slot": "budget"} +* explain + - utter_explain_why_budget + - sales_form + - active_loop{"name": null} +``` + +We'll need to add the intent and utterances we just added to our `domain.yml` file: + +```yaml +intents: +- greet: {triggers: action_greet_user} +- bye +- thank +- faq +- explain + +responses: + utter_explain_why_budget: + - text: We need to know your budget to recommend a subscription + utter_explain_why_email: + - text: We need your email so we can contact you +``` + +Finally, we'll need to add some NLU data for the explain intent: + +```md +## intent:explain +- why +- why is that +- why do you need it +- why do you need to know that? +- could you explain why you need it? +``` + +Then you can retrain your bot and test it again: + +```bash +rasa train +rasa shell +``` + +:::note +You will need to add a story for each of the values of the `requested_slot` slot +for the bot to handle every case of “Why do you need to know that” + +::: + +Don't forget to add a few end to end stories to your `test_stories.md` for testing as well. + +:::note +Here's a minimal checklist of of files we modified to handle unexpected user input: + +* `actions.py`: Define `action_greet` + +* `data/nlu.md`: Add training data for an `explain` intent + +* `domain.yml`: + + * Map intent `greet` to `action_greet_user` + + * Make `requested_slot` a categorical slot with all required slots as values + + * Add the `explain` intent + + * Add responses for contextual question interruptions + +* `data/stories.md`: + + * Remove stories using mapped intents if you have them + + * Add stories with FAQ & contextual interruptions in the middle of filling a form + +::: diff --git a/docs/docs/writing-stories.mdx b/docs/docs/writing-stories.mdx new file mode 100644 index 000000000000..6cd1e8fa08f1 --- /dev/null +++ b/docs/docs/writing-stories.mdx @@ -0,0 +1,27 @@ +--- +id: writing-stories +sidebar_label: Writing Stories +title: Writing Stories +--- +import useBaseUrl from '@docusaurus/useBaseUrl'; + +<!-- TODO add best practices --> + +:::info tbc +More best practices to come, we are still working on this page! +::: + +## Using Interactive Learning + +Interactive learning makes it easy to write stories by talking to your bot and providing feedback. +This is a powerful way to explore what your bot can do, and the easiest way to fix any mistakes +it makes. One advantage of machine learning-based dialogue is that when +your bot doesn't know how to do something yet, you can just teach it! + +In Rasa Open Source, you can run interactive learning in the command line with +[`rasa interactive`](./command-line-interface.mdx#interactive-learning). +[Rasa X](http://rasa.com/docs/rasa-x) provides a UI for interactive learning, and you can use any user conversation +as a starting point. See [Talk to Your Bot](https://rasa.com/docs/rasa-x/user-guide/share-assistant/#talk-to-your-bot) +in the Rasa X docs. + +<!-- TODO info about using interactive learning locally, can come from old docs --> diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js new file mode 100644 index 000000000000..deefedd5c813 --- /dev/null +++ b/docs/docusaurus.config.js @@ -0,0 +1,108 @@ +const path = require('path'); +const remarkSources = require('remark-sources'); +const remarkCollapse = require('remark-collapse'); +const { remarkProgramOutput } = require("./plugins/program_output"); + +let versions = []; +try { + versions = require('./versions.json'); +} catch (ex) { + // Nothing to do here, in dev mode, only + // one version of the doc is available +} + +const legacyVersion = { + label: 'Legacy 1.x', + to: 'https://legacy-docs-v1.rasa.com', +}; + +module.exports = { + title: 'Rasa Open Source Documentation', + tagline: 'Rasa Open Source Documentation', + url: 'https://rasa.com', + // FIXME: when deploying this for real, change to '/docs/rasa/' + baseUrl: '/docs/rasa/next/', + favicon: 'img/favicon.ico', + organizationName: 'RasaHQ', + projectName: 'rasa', + themeConfig: { + navbar: { + title: 'Rasa Open Source', + logo: { + alt: 'Rasa', + src: 'https://rasa.com/static/60e441f8eadef13bea0cc790c8cf188b/rasa-logo.svg', + }, + items: [ + { + label: 'Docs', + to: '/', // "fake" link + position: 'left', + items: versions.length > 0 ? [ + { + label: versions[0], + to: '/', + activeBaseRegex: versions[0], + }, + ...versions.slice(1).map((version) => ({ + label: version, + to: `${version}/`, + activeBaseRegex: version, + })), + { + label: 'Master/Unreleased', + to: 'next/', + activeBaseRegex: `next`, + }, + legacyVersion, + ] : [ + { + label: 'Master/Unreleased', + to: '/', + activeBaseRegex: `/`, + }, + legacyVersion, + ], + }, + { + href: 'https://github.com/rasahq/rasa', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + copyright: `Copyright © ${new Date().getFullYear()} Rasa Technologies GmbH`, + }, + gtm: { + containerID: 'GTM-PK448GB', + }, + }, + themes: [ + ['@docusaurus/theme-classic', { + customCss: require.resolve('./src/css/custom.css'), + }], + path.resolve(__dirname, './themes/theme-live-codeblock'), + ], + plugins: [ + ['@docusaurus/plugin-content-docs', { + // https://v2.docusaurus.io/docs/next/docs-introduction/#docs-only-mode + routeBasePath: '/', + // It is recommended to set document id as docs home page (`docs/` path). + homePageId: 'index', + sidebarPath: require.resolve('./sidebars.js'), + editUrl: 'https://github.com/rasahq/rasa/edit/master/docs/', + remarkPlugins: [ + [ remarkCollapse, { test: '' }], + remarkSources, + remarkProgramOutput + ], + }], + ['@docusaurus/plugin-sitemap', { + cacheTime: 600 * 1000, // 600 sec - cache purge period + changefreq: 'weekly', + priority: 0.5, + }], + path.resolve(__dirname, './plugins/google-tagmanager'), + ], +}; diff --git a/docs/glossary.rst b/docs/glossary.rst deleted file mode 100644 index 4aba1dcb21dd..000000000000 --- a/docs/glossary.rst +++ /dev/null @@ -1,94 +0,0 @@ -:desc: Glossary for all Rasa-related terms - -.. _glossary: - -Glossary -======== - -.. glossary:: - - :ref:`Action <actions>` - A single step that a bot takes in a conversation (e.g. calling an API or sending a response back to the user). - - Annotation - Adding labels to messages and conversations so that they can be used to train a model. - - CMS - A Content Management System (CMS) can be used to store bot responses externally instead of directly including it as part of the domain. This provides more flexibility in changing them as they are not tightly-coupled with the training data. - - :ref:`Custom Action <custom-actions>` - An action written by a Rasa developer that can run arbitrary code mainly to interact with the outside world. - - :ref:`Default Action <default-actions>` - A built-in action that comes with predefined functionality. - - :ref:`Domain <domains>` - Defines the inputs and outputs of an assistant. - - It includes a list of all the intents, entities, slots, actions, and forms that the assistant knows about. - - :ref:`Entity <entity-extraction>` - Structured information that can be extracted from a user message. - - For example a telephone number, a person's name, a location, the name of a product - - :ref:`Event <events>` - All conversations in Rasa are represented as a sequence of events. For instance, a ``UserUttered`` represents a user entering a message, and an ``ActionExecuted`` represents the assistant executing an action. You can learn more about them :ref:`here <events>`. - - :ref:`Form <forms>` - A type of custom action that asks the user for multiple pieces of information. - - For example, if you need a city, a cuisine, and a price range to recommend a restaurant, you can create a restaurant form to do that. You can describe any business logic inside a form. For example, if you want to ask for a particular neighbourhood if a user mentions a large city like Los Angeles, you can write that logic inside the form. - - Happy / Unhappy Paths - If your assistant asks a user for some information and the user provides it, we call that a happy path. Unhappy paths are all the possible edge cases of a bot. For example, the user refusing to give some input, changing the topic of conversation, or correcting something they said earlier. - - Intent - Something that a user is trying to convey or accomplish (e,g., greeting, specifying a location). - - :ref:`Interactive Learning <interactive-learning>` - A mode of training the bot where the user provides feedback to the bot while talking to it. - - This is a powerful way to write complicated stories by enabling users to explore what a bot can do and easily fix any mistakes it makes. - - NLG - Natural Language Generation (NLG) is the process of generating natural language messages to send to a user. - - Rasa uses a simple template-based approach for NLG. Data-driven approaches (such as neural NLG) can be implemented by creating a custom NLG component. - - :ref:`Rasa NLU <about-rasa-nlu>` - Natural Language Understanding (NLU) deals with parsing and understanding human language into a structured format. - - Rasa NLU is the part of Rasa that performs intent classification and entity extraction. - - :ref:`Pipeline <choosing-a-pipeline>` - A Rasa bot's NLU system is defined by a pipeline, which is a list of NLU components (see "Rasa NLU Component") in a particular order. A user input is processed by each component one by one before finally giving out the structured output. - - :ref:`Policy <policies>` - Policies make decisions on how conversation flow should proceed. At every turn, the policy which predicts the next action with the highest confidence will be used. A Core model can have multiple policies included, and the policy whose prediction has the highest confidence decides the next action to be taken. - - :ref:`Rasa Core <about-rasa-core>` - The dialogue engine that decides on what to do next in a conversation based on the context. - - :ref:`Rasa NLU Component <components>` - An element in the Rasa NLU pipeline (see "Pipeline"). - - Incoming messages are processed by a sequence of components called a pipeline. A component can perform tasks ranging from entity extraction to intent classification to pre-processing. - - :ref:`Slot <slots>` - A key-value store that Rasa uses to track information over the course of a conversation. - - :ref:`Story <stories>` - A conversation between a user and a bot annotated with the intent / entities of the users' messages as well as the sequence of actions to be performed by the bot - - :ref:`Template / Response / Utterance <responses>` - A message template that is used to respond to a user. This can include text, buttons, images, and other attachments. - - User Goal - A goal that a user wants to achieve. - - For example, a user may have the goal of booking a table at a restaurant. Another user may just want to make small talk. Sometimes, the user expresses their goal with a single message, e.g. "I want to book a table at a restaurant". Other times the assistant may have to ask a few questions to understand how to help the user. Note: Many other places refer to the user goal as the "intent", but in Rasa terminology, an intent is associated with every user message. - - Word embedding / Word vector - A vector of floating point numbers which represent the meaning of a word. Words which have similar meanings should have vectors which point in almost the same direction. Word embeddings are often used as an input to machine learning algorithms. - diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 29ea2c748274..000000000000 --- a/docs/index.rst +++ /dev/null @@ -1,111 +0,0 @@ -:desc: Learn more about open-source natural language processing library Rasa NLU - for intent classification and entity extraction in on premise chatbots. - -.. _index: - -Build contextual chatbots and AI assistants with Rasa -===================================================== - -.. note:: - These docs are for Rasa 1.0 and later. Docs for older versions are at http://legacy-docs.rasa.com. - - This is the documentation for version |release| of Rasa. Please make sure you are reading the documentation - that matches the version you have installed. - - -Rasa is an open source machine learning framework for automated text and voice-based conversations. -Understand messages, hold conversations, and connect to messaging channels and APIs. - - -.. toctree:: - :maxdepth: 1 - :caption: User Guide - :hidden: - - user-guide/installation - user-guide/rasa-tutorial - user-guide/command-line-interface - user-guide/architecture - user-guide/messaging-and-voice-channels - user-guide/evaluating-models - user-guide/validate-files - user-guide/running-the-server - user-guide/running-rasa-with-docker - user-guide/cloud-storage - -.. toctree:: - :maxdepth: 1 - :caption: NLU - :hidden: - - About <nlu/about> - nlu/using-nlu-only - nlu/training-data-format - nlu/choosing-a-pipeline - nlu/language-support - nlu/entity-extraction - nlu/components - -.. toctree:: - :maxdepth: 1 - :caption: Core - :hidden: - - About <core/about> - core/stories - core/domains - core/responses - core/actions - core/policies - core/slots - core/forms - core/retrieval-actions - core/interactive-learning - core/fallback-actions - core/knowledge-bases - -.. toctree:: - :maxdepth: 1 - :caption: Conversation Design - :hidden: - - dialogue-elements/dialogue-elements - dialogue-elements/small-talk - dialogue-elements/completing-tasks - dialogue-elements/guiding-users - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: API Reference - - api/action-server - api/http-api - api/jupyter-notebooks - api/agent - api/custom-nlu-components - api/events - api/tracker - api/tracker-stores - api/event-brokers - api/training-data-importers - api/featurization - migration-guide - changelog - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Migrate from (beta) - - Dialogflow <migrate-from/google-dialogflow-to-rasa> - Wit.ai <migrate-from/facebook-wit-ai-to-rasa> - LUIS <migrate-from/microsoft-luis-to-rasa> - IBM Watson <migrate-from/ibm-watson-to-rasa> - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Reference - - glossary diff --git a/docs/migrate-from/facebook-wit-ai-to-rasa.rst b/docs/migrate-from/facebook-wit-ai-to-rasa.rst deleted file mode 100644 index 07a1e4f9ffc5..000000000000 --- a/docs/migrate-from/facebook-wit-ai-to-rasa.rst +++ /dev/null @@ -1,100 +0,0 @@ -:desc: Open source alternative to Facebook's Wit.ai for conversational bots and NLP - -.. _facebook-wit-ai-to-rasa: - -Rasa as open source alternative to Facebook's Wit.ai - Migration Guide -====================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Facebook's Wit.ai to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 licence and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in <a class="reference external" href="http://blog.rasa.com/a-new-approach-to-conversational-software/" target="_blank">this blog post</a>. - - -Let's get started with migrating your application from Wit.ai to Rasa: - - -Step 1: Export your Training Data from Wit.ai -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Navigate to your app's setting page by clicking the **Settings** icon in the upper right corner. Scroll down to **Export your data** and hit the button **Download .zip with your data**. - -This will download a file with a ``.zip`` extension. Unzip this file to create a folder. The file you want from your download is called ``expressions.json`` - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move the expressions.json file into this directory. - -.. code-block:: bash - - rm -r data/* - mv /path/to/expressions.json data/ - - - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your Wit data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run nlu - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=wit' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from wit.ai. -You can also leave it out to get the result in the usual Rasa format. - - -Join the `Rasa Community Forum <https://forum.rasa.com/>`_ and let us know how your migration went! diff --git a/docs/migrate-from/google-dialogflow-to-rasa.rst b/docs/migrate-from/google-dialogflow-to-rasa.rst deleted file mode 100644 index 82cdaeba82b1..000000000000 --- a/docs/migrate-from/google-dialogflow-to-rasa.rst +++ /dev/null @@ -1,121 +0,0 @@ -:desc: Open source alternative to Google Dialogflow for conversational bots and NLP - -.. _google-dialogflow-to-rasa: - -Rasa as open source alternative to Google Dialogflow - Migration Guide -====================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Google Dialogflow to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 licence and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in <a class="reference external" href="http://blog.rasa.com/a-new-approach-to-conversational-software/" target="_blank">this blog post</a>. - <br> - <br> - -.. raw:: html - - Let's get started with migrating your application from Dialogflow to Rasa (you can find a more detailed tutorial <a class="reference external" href="http://blog.rasa.com/how-to-migrate-your-existing-google-dialogflow-assistant-to-rasa/" target="_blank">here</a>): - - - - - -Step 1: Export your data from Dialogflow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Navigate to your agent's settings by clicking the gear icon. - -.. image:: ../_static/images/dialogflow_export.png - :width: 240 - :alt: Dialogflow Export - -Click on the 'Export and Import' tab and click on the 'Export as ZIP' button. - -.. image:: ../_static/images/dialogflow_export_2.png - :width: 675 - :alt: Dialogflow Export 2 - - -This will download a file with a ``.zip`` extension. Unzip this file to create a folder. - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move your unzipped folder into this directory. - -.. code-block:: bash - - rm -r data/* - mv testagent data/ - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your dialogflow data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run nlu - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=dialogflow' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from dialogflow. -You can also leave it out to get the result in the usual Rasa format. - -Terminology: -^^^^^^^^^^^^ - -The words ``intent``, ``entity``, and ``utterance`` have the same meaning in Rasa as they do in Dialogflow. -In Dialogflow, there is a concept called ``Fulfillment``. In Rasa we call this a `Custom Action </docs/rasa/core/actions/#custom-actions>`_. - - -Join the `Rasa Community Forum <https://forum.rasa.com/>`_ and let us know how your migration went! diff --git a/docs/migrate-from/ibm-watson-to-rasa.rst b/docs/migrate-from/ibm-watson-to-rasa.rst deleted file mode 100644 index aba7ba83acad..000000000000 --- a/docs/migrate-from/ibm-watson-to-rasa.rst +++ /dev/null @@ -1,31 +0,0 @@ -:desc: Open source alternative to IBM Watson for conversational bots and NLP - -.. _ibm-watson-to-rasa: - -Rasa as open source alternative to IBM Watson - Migration Tips -============================================================== - -.. edit-link:: - - - -.. raw:: html - - <br> There is no support for IBM Watson yet. However, a group of community members is working on a way to use <a class="reference external" href="https://developer.ibm.com/tutorials/learn-how-to-export-import-a-watson-assistant-workspace/" target="_blank">exported IBM Watson workspaces</a> in Rasa. If you're interested in that, check out our <a class="reference external" href="https://forum.rasa.com/" target="_blank">Community Forum</a>. - - -At Rasa, we hear a few different reasons why developers switch from cloud-based tools like IBM Watson: - -* **Faster**: Runs locally - no https requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa comes with an Apache 2.0 licence and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in <a class="reference external" href="http://blog.rasa.com/a-new-approach-to-conversational-software/" target="_blank">this blog post</a>. - - -.. button:: - :link: ../get_started_step1/ - :text: Learn more about Rasa diff --git a/docs/migrate-from/microsoft-luis-to-rasa.rst b/docs/migrate-from/microsoft-luis-to-rasa.rst deleted file mode 100644 index b00279acf2a8..000000000000 --- a/docs/migrate-from/microsoft-luis-to-rasa.rst +++ /dev/null @@ -1,111 +0,0 @@ -:desc: Open source alternative to Microsoft LUIS for conversational bots and NLP - -.. _microsoft-luis-to-rasa: - -Rasa as open source alternative to Microsoft LUIS - Migration Guide -=================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Microsoft LUIS to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 licence and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in <a class="reference external" href="http://blog.rasa.com/a-new-approach-to-conversational-software/" target="_blank">this blog post</a>. - - -Let's get started with migrating your application from LUIS to Rasa: - - -Step 1: Export your Training Data from LUIS -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Go to your list of `LUIS applications <https://www.luis.ai/applications>`_ and click -on the three dots menu next to the app you want to export. - -.. image:: ../_static/images/luis_export.png - :width: 240 - :alt: LUIS Export - -Select 'Export App'. This will download a file with a ``.json`` extension that can be imported directly into Rasa. - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move your json file into this directory. - -.. code-block:: bash - - rm -r data/* - mv /path/to/file.json data/ - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your LUIS data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run nlu - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=luis' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from LUIS. -You can also leave it out to get the result in the usual Rasa format. - -Terminology: -^^^^^^^^^^^^ - -The words ``intent``, ``entity``, and ``utterance`` have the same meaning in Rasa as they do -in LUIS. -LUIS's ``patterns`` feature is very similar to Rasa NLU's `regex features </docs/rasa/nlu/training-data-format/#regular-expression-features>`_ -LUIS's ``phrase lists`` feature does not currently have an equivalent in Rasa NLU. - - -Join the `Rasa Community Forum <https://forum.rasa.com/>`_ and let us know how your migration went! diff --git a/docs/migration-guide.rst b/docs/migration-guide.rst deleted file mode 100644 index 4b64df86b7ff..000000000000 --- a/docs/migration-guide.rst +++ /dev/null @@ -1,108 +0,0 @@ -:desc: Information about changes between major versions of chatbot framework - Rasa Core and how you can migrate from one version to another. - -.. _migration-guide: - -Migration Guide -=============== - -.. edit-link:: - -This page contains information about changes between major versions and -how you can migrate from one version to another. - -.. _migration-to-rasa-1.3: - -Rasa 1.2 to Rasa 1.3 --------------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - It is not possible to load previously trained models. Please make sure to retrain a - model before trying to use it with this improved version. - -General -~~~~~~~ -- Default parameters of ``EmbeddingIntentClassifier`` are changed. See :ref:`components` for details. - Architecture implementation is changed as well, so **old trained models cannot be loaded**. - Default parameters and architecture for ``EmbeddingPolicy`` are changed. See :ref:`policies` for details. - It uses transformer instead of lstm. **Old trained models cannot be loaded**. - They use ``inner`` similarity and ``softmax`` loss by default instead of - ``cosine`` similarity and ``margin`` loss (can be set in config file). - They use ``balanced`` batching strategy by default to counteract class imbalance problem. - The meaning of ``evaluate_on_num_examples`` is changed. If it is non zero, random examples will be - picked by stratified split and used as **hold out** validation set, so they will be excluded from training data. - We suggest to set it to zero (default) if data set contains a lot of unique examples of dialogue turns. - Removed ``label_tokenization_flag`` and ``label_split_symbol`` from component. Instead moved intent splitting to ``Tokenizer`` components via ``intent_tokenization_flag`` and ``intent_split_symbol`` flag. -- Default ``max_history`` for ``EmbeddingPolicy`` is ``None`` which means it'll use - the ``FullDialogueTrackerFeaturizer``. We recommend to set ``max_history`` to - some finite value in order to use ``MaxHistoryTrackerFeaturizer`` - for **faster training**. See :ref:`featurization` for details. - We recommend to increase ``batch_size`` for ``MaxHistoryTrackerFeaturizer`` - (e.g. ``"batch_size": [32, 64]``) -- **Compare** mode of ``rasa train core`` allows the whole core config comparison. - Therefore, we changed the naming of trained models. They are named by config file - name instead of policy name. Old naming style will not be read correctly when - creating **compare** plots (``rasa test core``). Please remove old trained models - in comparison folder and retrain. Normal core training is unaffected. -- We updated the **evaluation metric** for our **NER**. We report the weighted precision and f1-score. - So far we included ``no-entity`` in this report. However, as most of the tokens actually don't have - an entity set, this will influence the weighted precision and f1-score quite a bit. From now on we - exclude ``no-entity`` from the evaluation. The overall metrics now only include proper entities. You - might see a drop in the performance scores when running the evaluation again. - -.. _migration-to-rasa-1.0: - -Rasa NLU 0.14.x and Rasa Core 0.13.x to Rasa 1.0 ------------------------------------------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - It is not possible to load previously trained models. Please make sure to retrain a - model before trying to use it with this improved version. - -General -~~~~~~~ - -- The scripts in ``rasa.core`` and ``rasa.nlu`` can no longer be executed. To train, test, run, ... an NLU or Core - model, you should now use the command line interface ``rasa``. The functionality is, for the most part, the same as before. - Some changes in commands reflect the combined training and running of NLU and Core models, but NLU and Core can still - be trained and used individually. If you attempt to run one of the old scripts in ``rasa.core`` or ``rasa.nlu``, - an error is thrown that points you to the command you - should use instead. See all the new commands at :ref:`command-line-interface`. - -- If you have written a custom output channel, all ``send_`` methods subclassed - from the ``OutputChannel`` class need to take an additional ``**kwargs`` - argument. You can use these keyword args from your custom action code or the - templates in your domain file to send any extra parameters used in your - channel's send methods. - -- If you were previously importing the ``Button`` or ``Element`` classes from - ``rasa_core.dispatcher``, these are now to be imported from ``rasa_sdk.utils``. - -- Rasa NLU and Core previously used `separate configuration files - <https://legacy-docs.rasa.com/docs/nlu/0.15.1/migrations/?&_ga=2.218966814.608734414.1560704810-314462423.1543594887#id1>`_. - These two files should be merged into a single file either named ``config.yml``, or passed via the ``--config`` parameter. - -Script parameters -~~~~~~~~~~~~~~~~~ -- All script parameter names have been unified to follow the same schema. - Any underscores (``_``) in arguments have been replaced with dashes (``-``). - For example: ``--max_history`` has been changed to ``--max-history``. You can - see all of the script parameters in the ``--help`` output of the commands - in the :ref:`command-line-interface`. - -- The ``--num_threads`` parameter was removed from the ``run`` command. The - server will always run single-threaded, but will now run asynchronously. If you want to - make use of multiple processes, feel free to check out the `Sanic server - documentation <https://sanic.readthedocs.io/en/latest/sanic/deploying.html#running-via-gunicorn>`_. - -- To avoid conflicts in script parameter names, connectors in the ``run`` command now need to be specified with - ``--connector``, as ``-c`` is no longer supported. The maximum history in the ``rasa visualize`` command needs to be - defined with ``--max-history``. Output paths and log files cannot be specified with ``-o`` anymore; ``--out`` and - ``--log-file`` should be used. NLU data has been standarized to be ``--nlu`` and the name of - any kind of data files or directory to be ``--data``. - -HTTP API -~~~~~~~~ -- There are numerous HTTP API endpoint changes which can be found `here <http://rasa.com/docs/rasa/api/http-api/>`_. diff --git a/docs/nlu/about.rst b/docs/nlu/about.rst deleted file mode 100644 index 176cda07d5aa..000000000000 --- a/docs/nlu/about.rst +++ /dev/null @@ -1,29 +0,0 @@ -:desc: Learn more about open-source natural language processing library Rasa NLU - for intent classification and entity extraction in on premise chatbots. - -.. _about-rasa-nlu: - -Rasa NLU: Language Understanding for Chatbots and AI assistants -=============================================================== - - -Rasa NLU is an open-source natural language processing tool for intent classification, response retrieval and -entity extraction in chatbots. For example, taking a sentence like - -.. code-block:: console - - "I am looking for a Mexican restaurant in the center of town" - -and returning structured data like - -.. code-block:: json - - { - "intent": "search_restaurant", - "entities": { - "cuisine" : "Mexican", - "location" : "center" - } - } - -Rasa NLU used to be a separate library, but it is now part of the Rasa framework. diff --git a/docs/nlu/choosing-a-pipeline.rst b/docs/nlu/choosing-a-pipeline.rst deleted file mode 100644 index 8a906e607a36..000000000000 --- a/docs/nlu/choosing-a-pipeline.rst +++ /dev/null @@ -1,351 +0,0 @@ -:desc: Set up a pipeline of pre-trained word vectors form GloVe or fastText - or fit them specifically on your dataset using the TensorFlow pipeline - for open source NLU. - -.. _choosing-a-pipeline: - -Choosing a Pipeline -=================== - -.. edit-link:: - -Choosing an NLU pipeline allows you to customize your model and finetune -it on your dataset. - -.. contents:: - :local: - - -The Short Answer ----------------- - -If you have less than 1000 total training examples, and there is a spaCy model for your -language, use the ``pretrained_embeddings_spacy`` pipeline: - -.. literalinclude:: ../../sample_configs/config_pretrained_embeddings_spacy.yml - :language: yaml - - -If you have 1000 or more labelled utterances, -use the ``supervised_embeddings`` pipeline: - -.. literalinclude:: ../../sample_configs/config_supervised_embeddings.yml - :language: yaml - - -A Longer Answer ---------------- - -The two most important pipelines are ``supervised_embeddings`` and ``pretrained_embeddings_spacy``. -The biggest difference between them is that the ``pretrained_embeddings_spacy`` pipeline uses pre-trained -word vectors from either GloVe or fastText. The ``supervised_embeddings`` pipeline, on the other hand, -doesn't use any pre-trained word vectors, but instead fits these specifically for your dataset. - - -pretrained_embeddings_spacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The advantage of the ``pretrained_embeddings_spacy`` pipeline is that if you have a training example like: -"I want to buy apples", and Rasa is asked to predict the intent for "get pears", your model -already knows that the words "apples" and "pears" are very similar. This is especially useful -if you don't have very much training data. - -supervised_embeddings -~~~~~~~~~~~~~~~~~~~~~ - -The advantage of the ``supervised_embeddings`` pipeline is that your word vectors will be customised -for your domain. For example, in general English, the word "balance" is closely related to "symmetry", -but very different to the word "cash". In a banking domain, "balance" and "cash" are closely related -and you'd like your model to capture that. This pipeline doesn't use a language-specific model, -so it will work with any language that you can tokenize (on whitespace or using a custom tokenizer). - -You can read more about this topic `here <https://medium.com/rasa-blog/supervised-word-vectors-from-scratch-in-rasa-nlu-6daf794efcd8>`__ . - -MITIE -~~~~~ - -You can also use MITIE as a source of word vectors in your pipeline, see :ref:`section_mitie_pipeline`. The MITIE backend performs well for small datasets, but training can take very long if you have more than a couple of hundred examples. - -However, we do not recommend that you use it as mitie support is likely to be deprecated in a future release. - -Comparing different pipelines for your data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Rasa gives you the tools to compare the performance of both of these pipelines on your data directly, -see :ref:`comparing-nlu-pipelines`. - -.. note:: - - Intent classification is independent of entity extraction. So sometimes - NLU will get the intent right but entities wrong, or the other way around. - You need to provide enough data for both intents and entities. - - -Class imbalance ---------------- - -Classification algorithms often do not perform well if there is a large `class imbalance`, -for example if you have a lot of training data for some intents and very little training data for others. -To mitigate this problem, rasa's ``supervised_embeddings`` pipeline uses a ``balanced`` batching strategy. -This algorithm ensures that all classes are represented in every batch, or at least in -as many subsequent batches as possible, still mimicking the fact that some classes are more frequent than others. -Balanced batching is used by default. In order to turn it off and use a classic batching strategy include -``batch_strategy: sequence`` in your config file. - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "CountVectorsFeaturizer" - - name: "EmbeddingIntentClassifier" - batch_strategy: sequence - - -Multiple Intents ----------------- - -If you want to split intents into multiple labels, -e.g. for predicting multiple intents or for modeling hierarchical intent structure, -you can only do this with the supervised embeddings pipeline. -To do this, use these flags in ``Whitespace Tokenizer``: - - - ``intent_split_symbol``: sets the delimiter string to split the intent labels. Default ``_`` - -`Here <https://blog.rasa.com/how-to-handle-multiple-intents-per-input-using-rasa-nlu-tensorflow-pipeline/>`__ is a tutorial on how to use multiple intents in Rasa Core and NLU. - -Here's an example configuration: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "EmbeddingIntentClassifier" - - -Understanding the Rasa NLU Pipeline ------------------------------------ - -In Rasa NLU, incoming messages are processed by a sequence of components. -These components are executed one after another -in a so-called processing pipeline. There are components for entity extraction, for intent classification, response selection, -pre-processing, and others. If you want to add your own component, for example to run a spell-check or to -do sentiment analysis, check out :ref:`custom-nlu-components`. - -Each component processes the input and creates an output. The output can be used by any component that comes after -this component in the pipeline. There are components which only produce information that is used by other components -in the pipeline and there are other components that produce ``Output`` attributes which will be returned after -the processing has finished. For example, for the sentence ``"I am looking for Chinese food"`` the output is: - -.. code-block:: json - - { - "text": "I am looking for Chinese food", - "entities": [ - {"start": 8, "end": 15, "value": "chinese", "entity": "cuisine", "extractor": "CRFEntityExtractor", "confidence": 0.864} - ], - "intent": {"confidence": 0.6485910906220309, "name": "restaurant_search"}, - "intent_ranking": [ - {"confidence": 0.6485910906220309, "name": "restaurant_search"}, - {"confidence": 0.1416153159565678, "name": "affirm"} - ] - } - -This is created as a combination of the results of the different components in the pre-configured pipeline ``pretrained_embeddings_spacy``. -For example, the ``entities`` attribute is created by the ``CRFEntityExtractor`` component. - - -.. _section_component_lifecycle: - -Component Lifecycle -------------------- -Every component can implement several methods from the ``Component`` -base class; in a pipeline these different methods -will be called in a specific order. Lets assume, we added the following -pipeline to our config: -``"pipeline": ["Component A", "Component B", "Last Component"]``. -The image shows the call order during the training of this pipeline: - -.. image:: /_static/images/component_lifecycle.png - -Before the first component is created using the ``create`` function, a so -called ``context`` is created (which is nothing more than a python dict). -This context is used to pass information between the components. For example, -one component can calculate feature vectors for the training data, store -that within the context and another component can retrieve these feature -vectors from the context and do intent classification. - -Initially the context is filled with all configuration values, the arrows -in the image show the call order and visualize the path of the passed -context. After all components are trained and persisted, the -final context dictionary is used to persist the model's metadata. - - -The "entity" object explained ------------------------------ -After parsing, the entity is returned as a dictionary. There are two fields that show information -about how the pipeline impacted the entities returned: the ``extractor`` field -of an entity tells you which entity extractor found this particular entity, and -the ``processors`` field contains the name of components that altered this -specific entity. - -The use of synonyms can also cause the ``value`` field not match the ``text`` -exactly. Instead it will return the trained synonym. - -.. code-block:: json - - { - "text": "show me chinese restaurants", - "intent": "restaurant_search", - "entities": [ - { - "start": 8, - "end": 15, - "value": "chinese", - "entity": "cuisine", - "extractor": "CRFEntityExtractor", - "confidence": 0.854, - "processors": [] - } - ] - } - -.. note:: - - The ``confidence`` will be set by the CRF entity extractor - (``CRFEntityExtractor`` component). The duckling entity extractor will always return - ``1``. The ``SpacyEntityExtractor`` extractor does not provide this information and - returns ``null``. - - -Pre-configured Pipelines ------------------------- - -A template is just a shortcut for -a full list of components. For example, these two configurations are equivalent: - -.. literalinclude:: ../../sample_configs/config_pretrained_embeddings_spacy.yml - :language: yaml - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "SpacyNLP" - - name: "SpacyTokenizer" - - name: "SpacyFeaturizer" - - name: "RegexFeaturizer" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - - name: "SklearnIntentClassifier" - -Below is a list of all the pre-configured pipeline templates with customization information. - -.. _section_supervised_embeddings_pipeline: - -supervised_embeddings -~~~~~~~~~~~~~~~~~~~~~ - -To train a Rasa model in your preferred language, define the -``supervised_embeddings`` pipeline as your pipeline in your ``config.yml`` or other configuration file: - -.. literalinclude:: ../../sample_configs/config_supervised_embeddings.yml - :language: yaml - -The ``supervised_embeddings`` pipeline supports any language that can be tokenized. By default it uses whitespace -for tokenization. You can customize the setup of this pipeline by adding or changing components. Here are the default -components that make up the ``supervised_embeddings`` pipeline: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - - name: "RegexFeaturizer" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - - name: "CountVectorsFeaturizer" - - name: "CountVectorsFeaturizer" - analyzer: "char_wb" - min_ngram: 1 - max_ngram: 4 - - name: "EmbeddingIntentClassifier" - -So for example, if your chosen language is not whitespace-tokenized (words are not separated by spaces), you -can replace the ``WhitespaceTokenizer`` with your own tokenizer. We support a number of different :ref:`tokenizers <tokenizers>`, -or you can :ref:`create your own <custom-nlu-components>`. - -The pipeline uses two instances of ``CountVectorsFeaturizer``. The first one -featurizes text based on words. The second one featurizes text based on character -n-grams, preserving word boundaries. We empirically found the second featurizer -to be more powerful, but we decided to keep the first featurizer as well to make -featurization more robust. - -.. _section_pretrained_embeddings_spacy_pipeline: - -pretrained_embeddings_spacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To use the ``pretrained_embeddings_spacy`` template: - -.. literalinclude:: ../../sample_configs/config_pretrained_embeddings_spacy.yml - :language: yaml - -See :ref:`pretrained-word-vectors` for more information about loading spacy language models. -To use the components and configure them separately: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "SpacyNLP" - - name: "SpacyTokenizer" - - name: "SpacyFeaturizer" - - name: "RegexFeaturizer" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - - name: "SklearnIntentClassifier" - -.. _section_mitie_pipeline: - -MITIE -~~~~~ - -To use the MITIE pipeline, you will have to train word vectors from a corpus. Instructions can be found -:ref:`here <mitie>`. This will give you the file path to pass to the ``model`` parameter. - -.. literalinclude:: ../../sample_configs/config_pretrained_embeddings_mitie.yml - :language: yaml - -Another version of this pipeline uses MITIE's featurizer and also its multi-class classifier. -Training can be quite slow, so this is not recommended for large datasets. - -.. literalinclude:: ../../sample_configs/config_pretrained_embeddings_mitie_2.yml - :language: yaml - - -Custom pipelines ----------------- - -You don't have to use a template, you can also run a fully custom pipeline -by listing the names of the components you want to use: - -.. code-block:: yaml - - pipeline: - - name: "SpacyNLP" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - -This creates a pipeline that only does entity recognition, but no -intent classification. So Rasa NLU will not predict any intents. -You can find the details of each component in :ref:`components`. - -If you want to use custom components in your pipeline, see :ref:`custom-nlu-components`. diff --git a/docs/nlu/components.rst b/docs/nlu/components.rst deleted file mode 100644 index 9b286faf9a53..000000000000 --- a/docs/nlu/components.rst +++ /dev/null @@ -1,851 +0,0 @@ -:desc: Customize the components and parameters of Rasa's Machine Learning based - Natural Language Understanding pipeline - -.. _components: - -Components -========== - -.. edit-link:: - -.. note:: - For clarity, we have renamed the pre-defined pipelines to reflect - what they *do* rather than which libraries they use as of Rasa NLU - 0.15. The ``tensorflow_embedding`` pipeline is now called - ``supervised_embeddings``, and ``spacy_sklearn`` is now known as - ``pretrained_embeddings_spacy``. Please update your code if you are using these. - -This is a reference of the configuration options for every built-in -component in Rasa NLU. If you want to build a custom component, check -out :ref:`custom-nlu-components`. - -.. contents:: - :local: - - -Word Vector Sources -------------------- - -.. _MitieNLP: - -MitieNLP -~~~~~~~~ - -:Short: MITIE initializer -:Outputs: nothing -:Requires: nothing -:Description: - Initializes mitie structures. Every mitie component relies on this, - hence this should be put at the beginning - of every pipeline that uses any mitie components. -:Configuration: - The MITIE library needs a language model file, that **must** be specified in - the configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieNLP" - # language model to load - model: "data/total_word_feature_extractor.dat" - - For more information where to get that file from, head over to - :ref:`installing MITIE <install-mitie>`. - -.. _SpacyNLP: - -SpacyNLP -~~~~~~~~ - -:Short: spacy language initializer -:Outputs: nothing -:Requires: nothing -:Description: - Initializes spacy structures. Every spacy component relies on this, hence this should be put at the beginning - of every pipeline that uses any spacy components. -:Configuration: - Language model, default will use the configured language. - If the spacy model to be used has a name that is different from the language tag (``"en"``, ``"de"``, etc.), - the model name can be specified using this configuration variable. The name will be passed to ``spacy.load(name)``. - - .. code-block:: yaml - - pipeline: - - name: "SpacyNLP" - # language model to load - model: "en_core_web_md" - - # when retrieving word vectors, this will decide if the casing - # of the word is relevant. E.g. `hello` and `Hello` will - # retrieve the same vector, if set to `false`. For some - # applications and models it makes sense to differentiate - # between these two words, therefore setting this to `true`. - case_sensitive: false - -Featurizers ------------ - -MitieFeaturizer -~~~~~~~~~~~~~~~ - -:Short: MITIE intent featurizer -:Outputs: nothing, used as an input to intent classifiers that need intent features (e.g. ``SklearnIntentClassifier``) -:Requires: :ref:`MitieNLP` -:Description: - Creates feature for intent classification using the MITIE featurizer. - - .. note:: - - NOT used by the ``MitieIntentClassifier`` component. Currently, only ``SklearnIntentClassifier`` is able - to use precomputed features. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieFeaturizer" - - - -SpacyFeaturizer -~~~~~~~~~~~~~~~ - -:Short: spacy intent featurizer -:Outputs: nothing, used as an input to intent classifiers that need intent features (e.g. ``SklearnIntentClassifier``) -:Requires: :ref:`SpacyNLP` -:Description: - Creates feature for intent classification using the spacy featurizer. - -NGramFeaturizer -~~~~~~~~~~~~~~~ - -:Short: Appends char-ngram features to feature vector -:Outputs: nothing, appends its features to an existing feature vector generated by another intent featurizer -:Requires: :ref:`SpacyNLP` -:Description: - This featurizer appends character ngram features to a feature vector. During training the component looks for the - most common character sequences (e.g. ``app`` or ``ing``). The added features represent a boolean flag if the - character sequence is present in the word sequence or not. - - .. note:: There needs to be another intent featurizer previous to this one in the pipeline! - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "NGramFeaturizer" - # Maximum number of ngrams to use when augmenting - # feature vectors with character ngrams - max_number_of_ngrams: 10 - - -RegexFeaturizer -~~~~~~~~~~~~~~~ - -:Short: regex feature creation to support intent and entity classification -:Outputs: ``text_features`` and ``tokens.pattern`` -:Requires: nothing -:Description: - During training, the regex intent featurizer creates a list of `regular expressions` defined in the training data format. - For each regex, a feature will be set marking whether this expression was found in the input, which will later be fed into intent classifier / entity - extractor to simplify classification (assuming the classifier has learned during the training phase, that this set - feature indicates a certain intent). Regex features for entity extraction are currently only supported by the - ``CRFEntityExtractor`` component! - - .. note:: There needs to be a tokenizer previous to this featurizer in the pipeline! - - -CountVectorsFeaturizer -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Creates bag-of-words representation of user message and label(intent and response) features -:Outputs: - nothing, used as an input to intent classifiers that - need bag-of-words representation of intent features - (e.g. ``EmbeddingIntentClassifier``) -:Requires: nothing -:Description: - Creates bag-of-words representation of user message and label features using - `sklearn's CountVectorizer <http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>`_. - All tokens which consist only of digits (e.g. 123 and 99 but not a123d) will be assigned to the same feature. - - .. note:: - If the words in the model language cannot be split by whitespace, - a language-specific tokenizer is required in the pipeline before this component - (e.g. using ``JiebaTokenizer`` for Chinese). - -:Configuration: - See `sklearn's CountVectorizer docs <http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>`_ - for detailed description of the configuration parameters. - - This featurizer can be configured to use word or character n-grams, using ``analyzer`` config parameter. - By default ``analyzer`` is set to ``word`` so word token counts are used as features. - If you want to use character n-grams, set ``analyzer`` to ``char`` or ``char_wb``. - - .. note:: - Option ‘char_wb’ creates character n-grams only from text inside word boundaries; - n-grams at the edges of words are padded with space. - This option can be used to create `Subword Semantic Hashing <https://arxiv.org/abs/1810.07150>`_ - - .. note:: - For character n-grams do not forget to increase ``min_ngram`` and ``max_ngram`` parameters. - Otherwise the vocabulary will contain only single letters - - Handling Out-Of-Vacabulary (OOV) words: - - .. note:: Enabled only if ``analyzer`` is ``word``. - - Since the training is performed on limited vocabulary data, it cannot be guaranteed that during prediction - an algorithm will not encounter an unknown word (a word that were not seen during training). - In order to teach an algorithm how to treat unknown words, some words in training data can be substituted by generic word ``OOV_token``. - In this case during prediction all unknown words will be treated as this generic word ``OOV_token``. - - For example, one might create separate intent ``outofscope`` in the training data containing messages of different number of ``OOV_token`` s and - maybe some additional general words. Then an algorithm will likely classify a message with unknown words as this intent ``outofscope``. - - .. note:: - This featurizer creates a bag-of-words representation by **counting** words, - so the number of ``OOV_token`` in the sentence might be important. - - - ``OOV_token`` set a keyword for unseen words; if training data contains ``OOV_token`` as words in some messages, - during prediction the words that were not seen during training will be substituted with provided ``OOV_token``; - if ``OOV_token=None`` (default behaviour) words that were not seen during training will be ignored during prediction time; - - ``OOV_words`` set a list of words to be treated as ``OOV_token`` during training; if a list of words that should be treated - as Out-Of-Vacabulary is known, it can be set to ``OOV_words`` instead of manually changing it in trainig data or using custom preprocessor. - - .. note:: - Providing ``OOV_words`` is optional, training data can contain ``OOV_token`` input manually or by custom additional preprocessor. - Unseen words will be substituted with ``OOV_token`` **only** if this token is present in the training data or ``OOV_words`` list is provided. - - Sharing Vocabulary between user message and labels: - - .. note:: Enabled only if ``use_shared_vocab`` is ``True`` - - Build a common vocabulary set between tokens in labels and user message. - - .. code-block:: yaml - - pipeline: - - name: "CountVectorsFeaturizer" - # whether to use a shared vocab - "use_shared_vocab": False, - # whether to use word or character n-grams - # 'char_wb' creates character n-grams only inside word boundaries - # n-grams at the edges of words are padded with space. - analyzer: 'word' # use 'char' or 'char_wb' for character - # the parameters are taken from - # sklearn's CountVectorizer - # regular expression for tokens - token_pattern: r'(?u)\b\w\w+\b' - # remove accents during the preprocessing step - strip_accents: None # {'ascii', 'unicode', None} - # list of stop words - stop_words: None # string {'english'}, list, or None (default) - # min document frequency of a word to add to vocabulary - # float - the parameter represents a proportion of documents - # integer - absolute counts - min_df: 1 # float in range [0.0, 1.0] or int - # max document frequency of a word to add to vocabulary - # float - the parameter represents a proportion of documents - # integer - absolute counts - max_df: 1.0 # float in range [0.0, 1.0] or int - # set ngram range - min_ngram: 1 # int - max_ngram: 1 # int - # limit vocabulary size - max_features: None # int or None - # if convert all characters to lowercase - lowercase: true # bool - # handling Out-Of-Vacabulary (OOV) words - # will be converted to lowercase if lowercase is true - OOV_token: None # string or None - OOV_words: [] # list of strings - -Intent Classifiers ------------------- - -KeywordIntentClassifier -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Simple keyword matching intent classifier. Not intended to be used. -:Outputs: ``intent`` -:Requires: nothing -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.98343} - } - -:Description: - This classifier is mostly used as a placeholder. It is able to recognize `hello` and - `goodbye` intents by searching for these keywords in the passed messages. - -MitieIntentClassifier -~~~~~~~~~~~~~~~~~~~~~ - -:Short: MITIE intent classifier (using a `text categorizer <https://github.com/mit-nlp/MITIE/blob/master/examples/python/text_categorizer_pure_model.py>`_) -:Outputs: ``intent`` -:Requires: A tokenizer and a featurizer -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.98343} - } - -:Description: - This classifier uses MITIE to perform intent classification. The underlying classifier - is using a multi-class linear SVM with a sparse linear kernel (see `MITIE trainer code <https://github.com/mit-nlp/MITIE/blob/master/mitielib/src/text_categorizer_trainer.cpp#L222>`_). - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieIntentClassifier" - -SklearnIntentClassifier -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: sklearn intent classifier -:Outputs: ``intent`` and ``intent_ranking`` -:Requires: A featurizer -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.78343}, - "intent_ranking": [ - { - "confidence": 0.1485910906220309, - "name": "goodbye" - }, - { - "confidence": 0.08161531595656784, - "name": "restaurant_search" - } - ] - } - -:Description: - The sklearn intent classifier trains a linear SVM which gets optimized using a grid search. In addition - to other classifiers it also provides rankings of the labels that did not "win". The spacy intent classifier - needs to be preceded by a featurizer in the pipeline. This featurizer creates the features used for the classification. - -:Configuration: - During the training of the SVM a hyperparameter search is run to - find the best parameter set. In the config, you can specify the parameters - that will get tried - - .. code-block:: yaml - - pipeline: - - name: "SklearnIntentClassifier" - # Specifies the list of regularization values to - # cross-validate over for C-SVM. - # This is used with the ``kernel`` hyperparameter in GridSearchCV. - C: [1, 2, 5, 10, 20, 100] - # Specifies the kernel to use with C-SVM. - # This is used with the ``C`` hyperparameter in GridSearchCV. - kernels: ["linear"] - -EmbeddingIntentClassifier -~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Embedding intent classifier -:Outputs: ``intent`` and ``intent_ranking`` -:Requires: A featurizer -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.8343}, - "intent_ranking": [ - { - "confidence": 0.385910906220309, - "name": "goodbye" - }, - { - "confidence": 0.28161531595656784, - "name": "restaurant_search" - } - ] - } - -:Description: - The embedding intent classifier embeds user inputs and intent labels into the same space. - Supervised embeddings are trained by maximizing similarity between them. - This algorithm is based on `StarSpace <https://arxiv.org/abs/1709.03856>`_. - However, in this implementation the loss function is slightly different and - additional hidden layers are added together with dropout. - This algorithm also provides similarity rankings of the labels that did not "win". - - The embedding intent classifier needs to be preceded by a featurizer in the pipeline. - This featurizer creates the features used for the embeddings. - It is recommended to use ``CountVectorsFeaturizer`` that can be optionally preceded - by ``SpacyNLP`` and ``SpacyTokenizer``. - - .. note:: If during prediction time a message contains **only** words unseen during training, - and no Out-Of-Vacabulary preprocessor was used, - empty intent ``None`` is predicted with confidence ``0.0``. - -:Configuration: - - The algorithm also has hyperparameters to control: - - - neural network's architecture: - - - ``hidden_layers_sizes_a`` sets a list of hidden layer sizes before - the embedding layer for user inputs, the number of hidden layers - is equal to the length of the list - - ``hidden_layers_sizes_b`` sets a list of hidden layer sizes before - the embedding layer for intent labels, the number of hidden layers - is equal to the length of the list - - ``share_hidden`` if set to True, shares the hidden layers between user inputs and intent label - - - training: - - - ``batch_size`` sets the number of training examples in one - forward/backward pass, the higher the batch size, the more - memory space you'll need; - - ``batch_strategy`` sets the type of batching strategy, - it should be either ``sequence`` or ``balanced``; - - ``epochs`` sets the number of times the algorithm will see - training data, where one ``epoch`` equals one forward pass and - one backward pass of all the training examples; - - ``random_seed`` if set to any int will get reproducible - training results for the same inputs; - - - embedding: - - - ``embed_dim`` sets the dimension of embedding space; - - ``num_neg`` sets the number of incorrect intent labels, - the algorithm will minimize their similarity to the user - input during training; - - ``similarity_type`` sets the type of the similarity, - it should be either ``auto``, ``cosine`` or ``inner``, - if ``auto``, it will be set depending on ``loss_type``, - ``inner`` for ``softmax``, ``cosine`` for ``margin``; - - ``loss_type`` sets the type of the loss function, - it should be either ``softmax`` or ``margin``; - - ``mu_pos`` controls how similar the algorithm should try - to make embedding vectors for correct intent labels, - used only if ``loss_type`` is set to ``margin``; - - ``mu_neg`` controls maximum negative similarity for - incorrect intents, - used only if ``loss_type`` is set to ``margin``; - - ``use_max_sim_neg`` if ``true`` the algorithm only - minimizes maximum similarity over incorrect intent labels, - used only if ``loss_type`` is set to ``margin``; - - ``scale_loss`` if ``true`` the algorithm will downscale the loss - for examples where correct label is predicted with high confidence, - used only if ``loss_type`` is set to ``softmax``; - - - regularization: - - - ``C2`` sets the scale of L2 regularization - - ``C_emb`` sets the scale of how important is to minimize - the maximum similarity between embeddings of different intent labels; - - ``droprate`` sets the dropout rate, it should be - between ``0`` and ``1``, e.g. ``droprate=0.1`` - would drop out ``10%`` of input units; - - .. note:: For ``cosine`` similarity ``mu_pos`` and ``mu_neg`` should be between ``-1`` and ``1``. - - .. note:: There is an option to use linearly increasing batch size. The idea comes from `<https://arxiv.org/abs/1711.00489>`_. - In order to do it pass a list to ``batch_size``, e.g. ``"batch_size": [64, 256]`` (default behaviour). - If constant ``batch_size`` is required, pass an ``int``, e.g. ``"batch_size": 64``. - - In the config, you can specify these parameters. - The default values are defined in ``EmbeddingIntentClassifier.defaults``: - - .. literalinclude:: ../../rasa/nlu/classifiers/embedding_intent_classifier.py - :dedent: 4 - :start-after: # default properties (DOC MARKER - don't remove) - :end-before: # end default properties (DOC MARKER - don't remove) - - .. note:: Parameter ``mu_neg`` is set to a negative value to mimic the original - starspace algorithm in the case ``mu_neg = mu_pos`` and ``use_max_sim_neg = False``. - See `starspace paper <https://arxiv.org/abs/1709.03856>`_ for details. - - -Selectors ----------- - -.. _response-selector: - -Response Selector -~~~~~~~~~~~~~~~~~~ - -:Short: Response Selector -:Outputs: A dictionary with key as ``direct_response_intent`` and value containing ``response`` and ``ranking`` -:Requires: A featurizer -:Output-Example: - - .. code-block:: json - - { - "text": "What is the recommend python version to install?", - "entities": [], - "intent": {"confidence": 0.6485910906220309, "name": "faq"}, - "intent_ranking": [ - {"confidence": 0.6485910906220309, "name": "faq"}, - {"confidence": 0.1416153159565678, "name": "greet"} - ], - "response_selector": { - "faq": { - "response": {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - "ranking": [ - {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - {"confidence": 0.2134543431, "name": "You can ask me about how to get started"} - ] - } - } - } - -:Description: - - Response Selector component can be used to build a response retrieval model to directly predict a bot response from - a set of candidate responses. The prediction of this model is used by :ref:`retrieval-actions`. - It embeds user inputs and response labels into the same space and follows the exact same - neural network architecture and optimization as the ``EmbeddingIntentClassifier``. - - The response selector needs to be preceded by a featurizer in the pipeline. - This featurizer creates the features used for the embeddings. - It is recommended to use ``CountVectorsFeaturizer`` that can be optionally preceded - by ``SpacyNLP``. - - .. note:: If during prediction time a message contains **only** words unseen during training, - and no Out-Of-Vacabulary preprocessor was used, - empty response ``None`` is predicted with confidence ``0.0``. - -:Configuration: - - The algorithm includes all the hyperparameters that ``EmbeddingIntentClassifier`` uses. - In addition, the component can also be configured to train a response selector for a particular retrieval intent - - - ``retrieval_intent``: sets the name of the intent for which this response selector model is trained. Default ``None`` - - In the config, you can specify these parameters. - The default values are defined in ``ResponseSelector.defaults``: - - .. literalinclude:: ../../rasa/nlu/selectors/embedding_response_selector.py - :dedent: 4 - :start-after: # default properties (DOC MARKER - don't remove) - :end-before: # end default properties (DOC MARKER - don't remove) - -.. _tokenizers: - -Tokenizers ----------- - -WhitespaceTokenizer -~~~~~~~~~~~~~~~~~~~ - -:Short: Tokenizer using whitespaces as a separator -:Outputs: nothing -:Requires: nothing -:Description: - Creates a token for every whitespace separated character sequence. Can be used to define tokens for the MITIE entity - extractor. -:Configuration: - - If you want to split intents into multiple labels, e.g. for predicting multiple intents or for - modeling hierarchical intent structure, use these flags: - - - tokenization of intent and response labels: - - ``intent_split_symbol`` sets the delimiter string to split the intent and response labels, default is whitespace. - - Make the tokenizer not case sensitive by adding the ``case_sensitive: false`` option. Default being ``case_sensitive: true``. - - .. code-block:: yaml - - pipeline: - - name: "WhitespaceTokenizer" - case_sensitive: false - -JiebaTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using Jieba for Chinese language -:Outputs: nothing -:Requires: nothing -:Description: - Creates tokens using the Jieba tokenizer specifically for Chinese - language. For language other than Chinese, Jieba will work as - ``WhitespaceTokenizer``. Can be used to define tokens for the - MITIE entity extractor. Make sure to install Jieba, ``pip install jieba``. -:Configuration: - User's custom dictionary files can be auto loaded by specific the files' directory path via ``dictionary_path`` - - .. code-block:: yaml - - pipeline: - - name: "JiebaTokenizer" - dictionary_path: "path/to/custom/dictionary/dir" - -If the ``dictionary_path`` is ``None`` (the default), then no custom dictionary will be used. - -MitieTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using MITIE -:Outputs: nothing -:Requires: :ref:`MitieNLP` -:Description: - Creates tokens using the MITIE tokenizer. Can be used to define - tokens for the MITIE entity extractor. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieTokenizer" - -SpacyTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using spacy -:Outputs: nothing -:Requires: :ref:`SpacyNLP` -:Description: - Creates tokens using the spacy tokenizer. Can be used to define - tokens for the MITIE entity extractor. - - -Entity Extractors ------------------ - -MitieEntityExtractor -~~~~~~~~~~~~~~~~~~~~ - -:Short: MITIE entity extraction (using a `MITIE NER trainer <https://github.com/mit-nlp/MITIE/blob/master/mitielib/src/ner_trainer.cpp>`_) -:Outputs: appends ``entities`` -:Requires: :ref:`MitieNLP` -:Output-Example: - - .. code-block:: json - - { - "entities": [{"value": "New York City", - "start": 20, - "end": 33, - "confidence": null, - "entity": "city", - "extractor": "MitieEntityExtractor"}] - } - -:Description: - This uses the MITIE entity extraction to find entities in a message. The underlying classifier - is using a multi class linear SVM with a sparse linear kernel and custom features. - The MITIE component does not provide entity confidence values. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieEntityExtractor" - -SpacyEntityExtractor -~~~~~~~~~~~~~~~~~~~~ - -:Short: spaCy entity extraction -:Outputs: appends ``entities`` -:Requires: :ref:`SpacyNLP` -:Output-Example: - - .. code-block:: json - - { - "entities": [{"value": "New York City", - "start": 20, - "end": 33, - "entity": "city", - "confidence": null, - "extractor": "SpacyEntityExtractor"}] - } - -:Description: - Using spaCy this component predicts the entities of a message. spacy uses a statistical BILOU transition model. - As of now, this component can only use the spacy builtin entity extraction models and can not be retrained. - This extractor does not provide any confidence scores. - -:Configuration: - Configure which dimensions, i.e. entity types, the spacy component - should extract. A full list of available dimensions can be found in - the `spaCy documentation <https://spacy.io/api/annotation#section-named-entities>`_. - Leaving the dimensions option unspecified will extract all available dimensions. - - .. code-block:: yaml - - pipeline: - - name: "SpacyEntityExtractor" - # dimensions to extract - dimensions: ["PERSON", "LOC", "ORG", "PRODUCT"] - - -EntitySynonymMapper -~~~~~~~~~~~~~~~~~~~ - - -:Short: Maps synonymous entity values to the same value. -:Outputs: modifies existing entities that previous entity extraction components found -:Requires: nothing -:Description: - If the training data contains defined synonyms (by using the ``value`` attribute on the entity examples). - this component will make sure that detected entity values will be mapped to the same value. For example, - if your training data contains the following examples: - - .. code-block:: json - - [{ - "text": "I moved to New York City", - "intent": "inform_relocation", - "entities": [{"value": "nyc", - "start": 11, - "end": 24, - "entity": "city", - }] - }, - { - "text": "I got a new flat in NYC.", - "intent": "inform_relocation", - "entities": [{"value": "nyc", - "start": 20, - "end": 23, - "entity": "city", - }] - }] - - This component will allow you to map the entities ``New York City`` and ``NYC`` to ``nyc``. The entitiy - extraction will return ``nyc`` even though the message contains ``NYC``. When this component changes an - exisiting entity, it appends itself to the processor list of this entity. - -CRFEntityExtractor -~~~~~~~~~~~~~~~~~~ - -:Short: conditional random field entity extraction -:Outputs: appends ``entities`` -:Requires: A tokenizer -:Output-Example: - - .. code-block:: json - - { - "entities": [{"value":"New York City", - "start": 20, - "end": 33, - "entity": "city", - "confidence": 0.874, - "extractor": "CRFEntityExtractor"}] - } - -:Description: - This component implements conditional random fields to do named entity recognition. - CRFs can be thought of as an undirected Markov chain where the time steps are words - and the states are entity classes. Features of the words (capitalisation, POS tagging, - etc.) give probabilities to certain entity classes, as are transitions between - neighbouring entity tags: the most likely set of tags is then calculated and returned. - If POS features are used (pos or pos2), spaCy has to be installed. -:Configuration: - .. code-block:: yaml - - pipeline: - - name: "CRFEntityExtractor" - # The features are a ``[before, word, after]`` array with - # before, word, after holding keys about which - # features to use for each word, for example, ``"title"`` - # in array before will have the feature - # "is the preceding word in title case?". - # Available features are: - # ``low``, ``title``, ``suffix5``, ``suffix3``, ``suffix2``, - # ``suffix1``, ``pos``, ``pos2``, ``prefix5``, ``prefix2``, - # ``bias``, ``upper``, ``digit`` and ``pattern`` - features: [["low", "title"], ["bias", "suffix3"], ["upper", "pos", "pos2"]] - - # The flag determines whether to use BILOU tagging or not. BILOU - # tagging is more rigorous however - # requires more examples per entity. Rule of thumb: use only - # if more than 100 examples per entity. - BILOU_flag: true - - # This is the value given to sklearn_crfcuite.CRF tagger before training. - max_iterations: 50 - - # This is the value given to sklearn_crfcuite.CRF tagger before training. - # Specifies the L1 regularization coefficient. - L1_c: 0.1 - - # This is the value given to sklearn_crfcuite.CRF tagger before training. - # Specifies the L2 regularization coefficient. - L2_c: 0.1 - -.. _DucklingHTTPExtractor: - -DucklingHTTPExtractor -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Duckling lets you extract common entities like dates, - amounts of money, distances, and others in a number of languages. -:Outputs: appends ``entities`` -:Requires: nothing -:Output-Example: - - .. code-block:: json - - { - "entities": [{"end": 53, - "entity": "time", - "start": 48, - "value": "2017-04-10T00:00:00.000+02:00", - "confidence": 1.0, - "extractor": "DucklingHTTPExtractor"}] - } - -:Description: - To use this component you need to run a duckling server. The easiest - option is to spin up a docker container using - ``docker run -p 8000:8000 rasa/duckling``. - - Alternatively, you can `install duckling directly on your - machine <https://github.com/facebook/duckling#quickstart>`_ and start the server. - - Duckling allows to recognize dates, numbers, distances and other structured entities - and normalizes them. - Please be aware that duckling tries to extract as many entity types as possible without - providing a ranking. For example, if you specify both ``number`` and ``time`` as dimensions - for the duckling component, the component will extract two entities: ``10`` as a number and - ``in 10 minutes`` as a time from the text ``I will be there in 10 minutes``. In such a - situation, your application would have to decide which entity type is be the correct one. - The extractor will always return `1.0` as a confidence, as it is a rule - based system. - -:Configuration: - Configure which dimensions, i.e. entity types, the duckling component - should extract. A full list of available dimensions can be found in - the `duckling documentation <https://duckling.wit.ai/>`_. - Leaving the dimensions option unspecified will extract all available dimensions. - - .. code-block:: yaml - - pipeline: - - name: "DucklingHTTPExtractor" - # url of the running duckling server - url: "http://localhost:8000" - # dimensions to extract - dimensions: ["time", "number", "amount-of-money", "distance"] - # allows you to configure the locale, by default the language is - # used - locale: "de_DE" - # if not set the default timezone of Duckling is going to be used - # needed to calculate dates from relative expressions like "tomorrow" - timezone: "Europe/Berlin" - - diff --git a/docs/nlu/entity-extraction.rst b/docs/nlu/entity-extraction.rst deleted file mode 100644 index 05d1289ce829..000000000000 --- a/docs/nlu/entity-extraction.rst +++ /dev/null @@ -1,151 +0,0 @@ -:desc: Use open source named entity recognition like Spacy or Duckling - and customize them according to your needs to build contextual - AI assistants - -.. _entity-extraction: - -Entity Extraction -================= - -.. edit-link:: - -.. contents:: - :local: - - -Introduction -^^^^^^^^^^^^ - -Here is a summary of the available extractors and what they are used for: - -========================= ================= ======================== ================================= -Component Requires Model Notes -========================= ================= ======================== ================================= -``CRFEntityExtractor`` sklearn-crfsuite conditional random field good for training custom entities -``SpacyEntityExtractor`` spaCy averaged perceptron provides pre-trained entities -``DucklingHTTPExtractor`` running duckling context-free grammar provides pre-trained entities -``MitieEntityExtractor`` MITIE structured SVM good for training custom entities -``EntitySynonymMapper`` existing entities N/A maps known synonyms -========================= ================= ======================== ================================= - -If your pipeline includes one or more of the components above, -the output of your trained model will include the extracted entities as well -as some metadata about which component extracted them. -The ``processors`` field contains the names of components that altered each entity. - -.. note:: - The ``value`` field can be different from what appears in the text. - If you use synonyms, an extracted entity like ``chinees`` will be mapped - to a standard value, e.g. ``chinese``. - -Here is an example response: - -.. code-block:: json - - { - "text": "show me chinese restaurants", - "intent": "restaurant_search", - "entities": [ - { - "start": 8, - "end": 15, - "value": "chinese", - "entity": "cuisine", - "extractor": "CRFEntityExtractor", - "confidence": 0.854, - "processors": [] - } - ] - } - - -Some extractors, like ``duckling``, may include additional information. For example: - -.. code-block:: json - - { - "additional_info":{ - "grain":"day", - "type":"value", - "value":"2018-06-21T00:00:00.000-07:00", - "values":[ - { - "grain":"day", - "type":"value", - "value":"2018-06-21T00:00:00.000-07:00" - } - ] - }, - "confidence":1.0, - "end":5, - "entity":"time", - "extractor":"DucklingHTTPExtractor", - "start":0, - "text":"today", - "value":"2018-06-21T00:00:00.000-07:00" - } - -.. note:: - - The `confidence` will be set by the CRF entity extractor - (`CRFEntityExtractor` component). The duckling entity extractor will always return - `1`. The `SpacyEntityExtractor` extractor does not provide this information and - returns `null`. - - -Custom Entities -^^^^^^^^^^^^^^^ - -Almost every chatbot and voice app will have some custom entities. -A restaurant assistant should understand ``chinese`` as a cuisine, -but to a language-learning assistant it would mean something very different. -The ``CRFEntityExtractor`` component can learn custom entities in any language, given -some training data. -See :ref:`training-data-format` for details on how to include entities in your training data. - - -Extracting Places, Dates, People, Organisations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -spaCy has excellent pre-trained named-entity recognisers for a few different languages. -You can test them out in this -`interactive demo <https://demos.explosion.ai/displacy-ent/>`_. -We don't recommend that you try to train your own NER using spaCy, -unless you have a lot of data and know what you are doing. -Note that some spaCy models are highly case-sensitive. - -Dates, Amounts of Money, Durations, Distances, Ordinals -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The `duckling <https://duckling.wit.ai/>`_ library does a great job -of turning expressions like "next Thursday at 8pm" into actual datetime -objects that you can use, e.g. - -.. code-block:: python - - "next Thursday at 8pm" - => {"value":"2018-05-31T20:00:00.000+01:00"} - - -The list of supported langauges can be found `here -<https://github.com/facebook/duckling/tree/master/Duckling/Dimensions>`_. -Duckling can also handle durations like "two hours", -amounts of money, distances, and ordinals. -Fortunately, there is a duckling docker container ready to use, -that you just need to spin up and connect to Rasa NLU -(see :ref:`DucklingHTTPExtractor`). - - -Regular Expressions (regex) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can use regular expressions to help the CRF model learn to recognize entities. -In your training data (see :ref:`training-data-format`) you can provide a list of regular expressions, each of which provides -the ``CRFEntityExtractor`` with an extra binary feature, which says if the regex was found (1) or not (0). - -For example, the names of German streets often end in ``strasse``. By adding this as a regex, -we are telling the model to pay attention to words ending this way, and will quickly learn to -associate that with a location entity. - -If you just want to match regular expressions exactly, you can do this in your code, -as a postprocessing step after receiving the response from Rasa NLU. diff --git a/docs/nlu/language-support.rst b/docs/nlu/language-support.rst deleted file mode 100644 index 4eca06d8b8e6..000000000000 --- a/docs/nlu/language-support.rst +++ /dev/null @@ -1,88 +0,0 @@ -:desc: Support all languages via custom domain-trained embeddings or pre-trained embeddings - with open source chatbot framework Rasa. - -.. _language-support: - -Language Support -================ - -.. edit-link:: - -You can use Rasa to build assistants in any language you want! Rasa's -``supervised_embeddings`` pipeline can be used on training data in **any language**. -This pipeline creates word embeddings from scratch with the data you provide. - -In addition, we also support pre-trained word embeddings such as spaCy. For information on -what pipeline is best for your use case, check out :ref:`choosing-a-pipeline`. - -.. contents:: - :local: - - -Training a Model in Any Language --------------------------------- - -Rasa's ``supervised_embeddings`` pipeline can be used to train models in any language, because -it uses your own training data to create custom word embeddings. This means that the vector -representation of any specific word will depend on its relationship with the other words in your -training data. This customization also means that the pipeline is great for use cases that hinge -on domain-specific data, such as those that require picking up on specific product names. - -To train a Rasa model in your preferred language, define the -``supervised_embeddings`` pipeline as your pipeline in your ``config.yml`` or other configuration file -via the instructions :ref:`here <section_supervised_embeddings_pipeline>`. - -After you define the ``supervised_embeddings`` processing pipeline and generate some :ref:`NLU training data <training-data-format>` -in your chosen language, train the model with ``rasa train nlu``. Once the training is finished, you can test your model's -language skills. See how your model interprets different input messages via: - -.. code-block:: bash - - rasa shell nlu - -.. note:: - - Even more so when training word embeddings from scratch, more training data will lead to a - better model! If you find your model is having trouble discerning your inputs, try training - with more example sentences. - -.. _pretrained-word-vectors: - -Pre-trained Word Vectors ------------------------- - -If you can find them in your language, pre-trained word vectors are a great way to get started with less data, -as the word vectors are trained on large amounts of data such as Wikipedia. - -spaCy -~~~~~ - -With the ``pretrained_embeddings_spacy`` :ref:`pipeline <section_pretrained_embeddings_spacy_pipeline>`, you can use spaCy's -`pre-trained language models <https://spacy.io/usage/models#languages>`_ or load fastText vectors, which are available -for `hundreds of languages <https://github.com/facebookresearch/fastText/blob/master/docs/crawl-vectors.md>`_. If you want -to incorporate a custom model you've found into spaCy, check out their page on -`adding languages <https://spacy.io/docs/usage/adding-languages>`_. As described in the documentation, you need to -register your language model and link it to the language identifier, which will allow Rasa to load and use your new language -by passing in your language identifier as the ``language`` option. - -.. _mitie: - -MITIE -~~~~~ - -You can also pre-train your own word vectors from a language corpus using :ref:`MITIE <section_mitie_pipeline>`. To do so: - -1. Get a clean language corpus (a Wikipedia dump works) as a set of text files. -2. Build and run `MITIE Wordrep Tool`_ on your corpus. - This can take several hours/days depending on your dataset and your workstation. - You'll need something like 128GB of RAM for wordrep to run -- yes, that's a lot: try to extend your swap. -3. Set the path of your new ``total_word_feature_extractor.dat`` as the ``model`` parameter in your - :ref:`configuration <section_mitie_pipeline>`. - -For a full example of how to train MITIE word vectors, check out -`this blogpost <http://www.crownpku.com/2017/07/27/%E7%94%A8Rasa_NLU%E6%9E%84%E5%BB%BA%E8%87%AA%E5%B7%B1%E7%9A%84%E4%B8%AD%E6%96%87NLU%E7%B3%BB%E7%BB%9F.html>`_ -of creating a MITIE model from a Chinese Wikipedia dump. - - -.. _`MITIE Wordrep Tool`: https://github.com/mit-nlp/MITIE/tree/master/tools/wordrep - diff --git a/docs/nlu/old-nlu-change-log.rst b/docs/nlu/old-nlu-change-log.rst deleted file mode 100644 index f5d472fe29c4..000000000000 --- a/docs/nlu/old-nlu-change-log.rst +++ /dev/null @@ -1,862 +0,0 @@ -:desc: Rasa NLU Changelog - -.. _old-nlu-change-log: - -NLU Change Log -============== - -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning`_ starting with version 0.7.0. - -[0.15.1] - Unreleased -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed bug in rasa_nlu.test script that appeared if no intent classifier was present - -[0.15.0] - 2019-04-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Added a detailed warning showing which entities are overlapping -- Authentication token can be also set with env variable ``RASA_NLU_TOKEN``. -- ``SpacyEntityExtractor`` supports same entity filtering as ``DucklingHTTPExtractor`` -- **added support for python 3.7** - -Changed -------- -- validate training data only if used for training -- applied spacy guidelines on how to disable pipeline components -- starter packs now also tested when attempting to merge a branch to master -- new consistent naming scheme for pipelines: - - ``tensorflow_embedding`` pipeline template renamed to ``supervised_embeddings`` - - ``spacy_sklearn`` pipeline template renamed to ``pretrained_embeddings_spacy`` - - requirements files, sample configs, and dockerfiles renamed accordingly -- ``/train`` endpoint now returns a zipfile of the trained model. -- pipeline components in the config file should be provided - with their class name -- persisted components file name changed -- replace pep8 with pycodestyle -- ``Component.name`` property returns component's class name -- Components ``load(...)``, ``create(...)`` and ``cache_key(...)`` methods - additionally take component's meta/config dicts -- Components ``persist(...)`` method additionally takes file name prefix -- renamed ``rasa_nlu.evaluate`` to ``rasa_nlu.test`` -- renamed ``rasa_nlu.test.run_cv_evaluation`` to - ``rasa_nlu.test.cross_validate`` -- renamed ``rasa_nlu.train.do_train()`` to ``rasa_nlu.train.train()`` -- train command can now also load config from file -- updated to tensorflow 1.13 - -Removed -------- -- **removed python 2.7 support** - -Fixed ------ -- ``RegexFeaturizer`` detects all regex in user message (not just first) -- do_extractors_support_overlap now correctly throws an exception only if no extractors are - passed or if extractors that do not support overlapping entities are used. -- Docs entry for pretrained embeddings pipeline is now consistent with the - code in ``registry.py`` - - -[0.14.6] - 2019-03-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed Changelog dates (dates had the wrong year attached) - -[0.14.5] - 2019-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly tag ``stable`` docker image (instead of alpha) - -[0.14.3] - 2019-02-01 -^^^^^^^^^^^^^^^^^^^^^ -- - -Changed -------- -- starter packs are now tested in parallel with the unittests, - and only on branches ending in ``.x`` (i.e. new version releases) -- pinned ``coloredlogs``, ``future`` and ``packaging`` - -[0.14.2] - 2019-01-29 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``rasa_nlu.evaluate`` now exports reports into a folder and also - includes the entity extractor reports - -Changed -------- -- updated requirements to match Core and SDK -- pinned keras dependecies - -[0.14.1] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- scikit-learn is a global requirement - -.. _nluv0-14-0: - -[0.14.0] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Ability to save successful predictions and classification results to a JSON - file from ``rasa_nlu.evaluate`` -- environment variables specified with ``${env_variable}`` in a yaml - configuration file are now replaced with the value of the environment - variable -- more documentation on how to run NLU with Docker -- ``analyzer`` parameter to ``intent_featurizer_count_vectors`` featurizer to - configure whether to use word or character n-grams -- Travis script now clones and tests the Rasa NLU starter pack - -Changed -------- -- ``EmbeddingIntentClassifier`` has been refactored, including changes to the - config parameters as well as comments and types for all class functions. -- the http server's ``POST /evaluate`` endpoint returns evaluation results - for both entities and intents -- replaced ``yaml`` with ``ruamel.yaml`` -- updated spacy version to 2.0.18 -- updated TensorFlow version to 1.12.0 -- updated scikit-learn version to 0.20.2 -- updated cloudpickle version to 0.6.1 -- updated requirements to match Core and SDK -- pinned keras dependecies - -Removed -------- -- ``/config`` endpoint -- removed pinning of ``msgpack`` and unused package ``python-msgpack`` -- removed support for ``ner_duckling``. Now supports only ``ner_duckling_http`` - -Fixed ------ -- Should loading jieba custom dictionaries only once. -- Set attributes of custom components correctly if they defer from the default -- NLU Server can now handle training data mit emojis in it -- If the ``token_name`` is not given in the endpoint configuration, the default - value is ``token`` instead of ``None`` -- Throws error only if ``ner_crf`` picks up overlapping entities. If the - entity extractor supports overlapping entitis no error is thrown. -- Updated CORS support for the server. - Added the ``Access-Control-Allow-Headers`` and ``Content-Type`` headers - for nlu server -- parsing of emojis which are sent within jsons -- Bad input shape error from ``sklearn_intent_classifier`` when using - ``scikit-learn==0.20.2`` - -[0.13.8] - 2018-11-21 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pinned spacy version to ``spacy<=2.0.12,>2.0`` to avoid dependency conflicts - with tensorflow - -[0.13.7] - 2018-10-11 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- ``rasa_nlu.server`` allowed more than ``max_training_processes`` - to be trained if they belong to different projects. - ``max_training_processes`` is now a global parameter, regardless of what - project the training process belongs to. - - -[0.13.6] - 2018-10-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- ``boto3`` is now loaded lazily in ``AWSPersistor`` and is not - included in ``requirements_bare.txt`` anymore - -Fixed ------ -- Allow training of pipelines containing ``EmbeddingIntentClassifier`` in - a separate thread on python 3. This makes http server calls to ``/train`` - non-blocking -- require ``scikit-learn<0.20`` in setup py to avoid corrupted installations - with the most recent scikit learn - - -[0.13.5] - 2018-09-28 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Training data is now validated after loading from files in ``loading.py`` - instead of on initialisation of ``TrainingData`` object - -Fixed ------ -- ``Project`` set up to pull models from a remote server only use - the pulled model instead of searching for models locally - -[0.13.4] - 2018-09-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pinned matplotlib to 2.x (not ready for 3.0 yet) -- pytest-services since it wasn't used and caused issues on Windows - -[0.13.3] - 2018-08-28 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``EndpointConfig`` class that handles authenticated requests - (ported from Rasa Core) -- ``DataRouter()`` class supports a ``model_server`` ``EndpointConfig``, - which it regularly queries to fetch NLU models -- this can be used with ``rasa_nlu.server`` with the ``--endpoint`` option - (the key for this the model server config is ``model``) -- docs on model fetching from a URL -- ability to specify lookup tables in training data - -Changed -------- -- loading training data from a URL requires an instance of ``EndpointConfig`` - -- Changed evaluate behaviour to plot two histogram bars per bin. - Plotting confidence of right predictions in a wine-ish colour - and wrong ones in a blue-ish colour. - -Removed -------- - -Fixed ------ -- re-added support for entity names with special characters in markdown format - -[0.13.2] - 2018-08-28 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- added information about migrating the CRF component from 0.12 to 0.13 - -Fixed ------ -- pipelines containing the ``EmbeddingIntentClassifier`` are not trained in a - separate thread, as this may lead to freezing during training - -[0.13.1] - 2018-08-07 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- documentation example for creating a custom component - -Fixed ------ -- correctly pass reference time in miliseconds to duckling_http - -.. _nluv0-13-0: - -[0.13.0] - 2018-08-02 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load previously trained models as - the parameters for the tensorflow and CRF models changed. - -Added ------ -- support for `tokenizer_jieba` load custom dictionary from config -- allow pure json including pipeline configuration on train endpoint -- doc link to a community contribution for Rasa NLU in Chinese -- support for component ``count_vectors_featurizer`` use ``tokens`` - feature provide by tokenizer -- 2-character and a 5-character prefix features to ``ner_crf`` -- ``ner_crf`` with whitespaced tokens to ``tensorflow_embedding`` pipeline -- predict empty string instead of None for intent name -- update default parameters for tensorflow embedding classifier -- do not predict anything if feature vector contains only zeros - in tensorflow embedding classifier -- change persistence keywords in tensorflow embedding classifier - (make previously trained models impossible to load) -- intent_featurizer_count_vectors adds features to text_features - instead of overwriting them -- add basic OOV support to intent_featurizer_count_vectors (make - previously trained models impossible to load) -- add a feature for each regex in the training set for crf_entity_extractor -- Current training processes count for server and projects. -- the ``/version`` endpoint returns a new field ``minimum_compatible_version`` -- added logging of intent prediction errors to evaluation script -- added histogram of confidence scores to evaluation script -- documentation for the ``ner_duckling_http`` component - -Changed -------- -- renamed CRF features ``wordX`` to ``suffixX`` and ``preX`` to ``suffixX`` -- L1 and L2 regularisation defaults in ``ner_crf`` both set to 0.1 -- ``whitespace_tokenizer`` ignores punctuation ``.,!?`` before - whitespace or end of string -- Allow multiple training processes per project -- Changed AlreadyTrainingError to MaxTrainingError. The first one was used - to indicate that the project was already training. The latest will show - an error when the server isn't able to training more models. -- ``Interpreter.ensure_model_compatibility`` takes a new parameters for - the version to compare the model version against -- confusion matrix plot gets saved to file automatically during evaluation - -Removed -------- -- dependence on spaCy when training ``ner_crf`` without POS features -- documentation for the ``ner_duckling`` component - facebook doesn't maintain - the underlying clojure version of duckling anymore. component will be - removed in the next release. - -Fixed ------ -- Fixed Luis emulation output to add start, end position and - confidence for each entity. -- Fixed byte encoding issue where training data could not be - loaded by URL in python 3. - -[0.12.3] - 2018-05-02 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Returning used model name and project name in the response - of ``GET /parse`` and ``POST /parse`` as ``model`` and ``project`` - respectively. - -Fixed ------ -- readded possibility to set fixed model name from http train endpoint - - -[0.12.2] - 2018-04-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed duckling text extraction for ner_duckling_http - - -[0.12.1] - 2018-04-18 -^^^^^^^^^^^^^^^^^^^^^ -Added ------ -- support for retrieving training data from a URL - -Fixed ------ -- properly set duckling http url through environment setting -- improvements and fixes to the configuration and pipeline - documentation - -.. _nluv0-12-0: - -[0.12.0] - 2018-04-17 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for inline entity synonyms in markdown training format -- support for regex features in markdown training format -- support for splitting and training data into multiple and mixing formats -- support for markdown files containing regex-features or synonyms only -- added ability to list projects in cloud storage services for model loading -- server evaluation endpoint at ``POST /evaluate`` -- server endpoint at ``DELETE /models`` to unload models from server memory -- CRF entity recognizer now returns a confidence score when extracting entities -- added count vector featurizer to create bag of words representation -- added embedding intent classifier implemented in tensorflow -- added tensorflow requirements -- added docs blurb on handling contextual dialogue -- distribute package as wheel file in addition to source - distribution (faster install) -- allow a component to specify which languages it supports -- support for persisting models to Azure Storage -- added tokenizer for CHINESE (``zh``) as well as instructions on how to load - MITIE model - -Changed -------- -- model configuration is separated from server / train configuration. This is a - **breaking change** and models need to be retrained. See migrations guide. -- Regex features are now sorted internally. - **retrain your model if you use regex features** -- The keyword intent classifier now returns ``null`` instead - of ``"None"`` as intent name in the json result if there's no match -- in teh evaluation results, replaced ``O`` with the string - ``no_entity`` for better understanding -- The ``CRFEntityExtractor`` now only trains entity examples that have - ``"extractor": "ner_crf"`` or no extractor at all -- Ignore hidden files when listing projects or models -- Docker Images now run on python 3.6 for better non-latin character set support -- changed key name for a file in ngram featurizer -- changed ``jsonObserver`` to generate logs without a record seperator -- Improve jsonschema validation: text attribute of training data samples - can not be empty -- made the NLU server's ``/evaluate`` endpoint asynchronous - -Fixed ------ -- fixed certain command line arguments not getting passed into - the ``data_router`` - -[0.11.4] - 2018-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- google analytics docs survey code - - -[0.11.3] - 2018-02-13 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- capitalization issues during spacy named entity recognition - - -[0.11.2] - 2018-02-06 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Formatting of tokens without assigned entities in evaluation - - -[0.11.1] - 2018-02-02 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Changelog doc formatting -- fixed project loading for newly added projects to a running server -- fixed certain command line arguments not getting passed into the data_router - -.. _nluv0-11-0: - -[0.11.0] - 2018-01-30 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- non ascii character support for anything that gets json dumped (e.g. - training data received over HTTP endpoint) -- evaluation of entity extraction performance in ``evaluation.py`` -- support for spacy 2.0 -- evaluation of intent classification with crossvalidation in ``evaluation.py`` -- support for splitting training data into multiple files - (markdown and JSON only) - -Changed -------- -- removed ``-e .`` from requirements files - if you want to install - the app use ``pip install -e .`` -- fixed http duckling parsing for non ``en`` languages -- fixed parsing of entities from markdown training data files - - -[0.10.6] - 2018-01-02 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support asterisk style annotation of examples in markdown format - -Fixed ------ -- Preventing capitalized entities from becoming synonyms of the form - lower-cased → capitalized - - -[0.10.5] - 2017-12-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- read token in server from config instead of data router -- fixed reading of models with none date name prefix in server - - -[0.10.4] - 2017-10-27 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- docker image build - - -[0.10.3] - 2017-10-26 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for new dialogflow data format (previously api.ai) -- improved support for custom components (components are - stored by class name in stored metadata to allow for components - that are not mentioned in the Rasa NLU registry) -- language option to convert script - -Fixed ------ -- Fixed loading of default model from S3. Fixes #633 -- fixed permanent training status when training fails #652 -- quick fix for None "_formatter_parser" bug - - -[0.10.1] - 2017-10-06 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- readme issues -- improved setup py welcome message - -.. _nluv0-10-0: - -[0.10.0] - 2017-09-27 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Support for training data in Markdown format -- Cors support. You can now specify allowed cors origins - within your configuration file. -- The HTTP server is now backed by Klein (Twisted) instead of Flask. - The server is now asynchronous but is no more WSGI compatible -- Improved Docker automated builds -- Rasa NLU now works with projects instead of models. A project can - be the basis for a restaurant search bot in German or a customer - service bot in English. A model can be seen as a snapshot of a project. - -Changed -------- -- Root project directories have been slightly rearranged to - clean up new docker support -- use ``Interpreter.create(metadata, ...)`` to create interpreter - from dict and ``Interpreter.load(file_name, ...)`` to create - interpreter with metadata from a file -- Renamed ``name`` parameter to ``project`` -- Docs hosted on GitHub pages now: - `Documentation <https://rasahq.github.io/rasa_nlu>`_ -- Adapted remote cloud storages to support projects - (backwards incompatible!) - -Fixed ------ -- Fixed training data persistence. Fixes #510 -- Fixed UTF-8 character handling when training through HTTP interface -- Invalid handling of numbers extracted from duckling - during synonym handling. Fixes #517 -- Only log a warning (instead of throwing an exception) on - misaligned entities during mitie NER - - -[0.9.2] - 2017-08-16 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed unnecessary `ClassVar` import - - -[0.9.1] - 2017-07-11 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed obsolete ``--output`` parameter of ``train.py``. - use ``--path`` instead. fixes #473 - -.. _nluv0-9-0: - -[0.9.0] - 2017-07-07 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- increased test coverage to avoid regressions (ongoing) -- added regex featurization to support intent classification - and entity extraction (``intent_entity_featurizer_regex``) - -Changed -------- -- replaced existing CRF library (python-crfsuite) with - sklearn-crfsuite (due to better windows support) -- updated to spacy 1.8.2 -- logging format of logged request now includes model name and timestamp -- use module specific loggers instead of default python root logger -- output format of the duckling extractor changed. the ``value`` - field now includes the complete value from duckling instead of - just text (so this is an property is an object now instead of just text). - includes granularity information now. -- deprecated ``intent_examples`` and ``entity_examples`` sections in - training data. all examples should go into the ``common_examples`` section -- weight training samples based on class distribution during ner_crf - cross validation and sklearn intent classification training -- large refactoring of the internal training data structure and - pipeline architecture -- numpy is now a required dependency - -Removed -------- -- luis data tokenizer configuration value (not used anymore, - luis exports char offsets now) - -Fixed ------ -- properly update coveralls coverage report from travis -- persistence of duckling dimensions -- changed default response of untrained ``intent_classifier_sklearn`` - from ``"intent": None`` to ``"intent": {"name": None, "confidence": 0.0}`` -- ``/status`` endpoint showing all available models instead of only - those whose name starts with *model* -- properly return training process ids #391 - - -[0.8.12] - 2017-06-29 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed missing argument attribute error - - - -[0.8.11] - 2017-06-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- updated mitie installation documentation - - -[0.8.10] - 2017-05-31 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed documentation about training data format - - -[0.8.9] - 2017-05-26 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly handle response_log configuration variable being set to ``null`` - - -[0.8.8] - 2017-05-26 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- ``/status`` endpoint showing all available models instead of only - those whose name starts with *model* - - -[0.8.7] - 2017-05-24 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed range calculation for crf #355 - - -[0.8.6] - 2017-05-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed duckling dimension persistence. fixes #358 - - -[0.8.5] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed pypi installation dependencies (e.g. flask). fixes #354 - - -[0.8.4] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed CRF model training without entities. fixes #345 - - -[0.8.3] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed Luis emulation and added test to catch regression. Fixes #353 - - -[0.8.2] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- deepcopy of context #343 - - -[0.8.1] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- NER training reuses context inbetween requests - -.. _nluv0-8-0: - -[0.8.0] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ngram character featurizer (allows better handling of out-of-vocab words) -- replaced pre-wired backends with more flexible pipeline definitions -- return top 10 intents with sklearn classifier - `#199 <https://github.com/RasaHQ/rasa_nlu/pull/199>`_ -- python type annotations for nearly all public functions -- added alternative method of defining entity synonyms -- support for arbitrary spacy language model names -- duckling components to provide normalized output for structured entities -- Conditional random field entity extraction (Markov model for entity - tagging, better named entity recognition with low and medium data and - similarly well at big data level) -- allow naming of trained models instead of generated model names -- dynamic check of requirements for the different components & error - messages on missing dependencies -- support for using multiple entity extractors and combining results downstream - -Changed -------- -- unified tokenizers, classifiers and feature extractors to implement - common component interface -- ``src`` directory renamed to ``rasa_nlu`` -- when loading data in a foreign format (api.ai, luis, wit) the data - gets properly split into intent & entity examples -- Configuration: - - added ``max_number_of_ngrams`` - - removed ``backend`` and added ``pipeline`` as a replacement - - added ``luis_data_tokenizer`` - - added ``duckling_dimensions`` -- parser output format changed - from ``{"intent": "greeting", "confidence": 0.9, "entities": []}`` - - to ``{"intent": {"name": "greeting", "confidence": 0.9}, "entities": []}`` -- entities output format changed - from ``{"start": 15, "end": 28, "value": "New York City", "entity": "GPE"}`` - - to ``{"extractor": "ner_mitie", "processors": ["ner_synonyms"], "start": 15, "end": 28, "value": "New York City", "entity": "GPE"}`` - - where ``extractor`` denotes the entity extractor that originally found an entity, and ``processor`` denotes components that alter entities, such as the synonym component. -- camel cased MITIE classes (e.g. ``MITIETokenizer`` → ``MitieTokenizer``) -- model metadata changed, see migration guide -- updated to spacy 1.7 and dropped training and loading capabilities for - the spacy component (breaks existing spacy models!) -- introduced compatibility with both Python 2 and 3 - -Fixed ------ -- properly parse ``str`` additionally to ``unicode`` - `#210 <https://github.com/RasaHQ/rasa_nlu/issues/210>`_ -- support entity only training - `#181 <https://github.com/RasaHQ/rasa_nlu/issues/181>`_ -- resolved conflicts between metadata and configuration values - `#219 <https://github.com/RasaHQ/rasa_nlu/issues/219>`_ -- removed tokenization when reading Luis.ai data (they changed their format) - `#241 <https://github.com/RasaHQ/rasa_nlu/issues/241>`_ - - -[0.7.4] - 2017-03-27 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed failed loading of example data after renaming attributes, - i.e. "KeyError: 'entities'" - - -[0.7.3] - 2017-03-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed regression in mitie entity extraction on special characters -- fixed spacy fine tuning and entity recognition on passed language instance - - -[0.7.2] - 2017-03-13 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- python documentation about calling rasa NLU from python - - -[0.7.1] - 2017-03-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- mitie tokenization value generation - `#207 <https://github.com/RasaHQ/rasa_nlu/pull/207>`_, thanks @cristinacaputo -- changed log file extension from ``.json`` to ``.log``, - since the contained text is not proper json - -.. _nluv0-7-0: - -[0.7.0] - 2017-03-10 -^^^^^^^^^^^^^^^^^^^^ -This is a major version update. Please also have a look at the -`Migration Guide <https://rasahq.github.io/rasa_nlu/migrations.html>`_. - -Added ------ -- Changelog ;) -- option to use multi-threading during classifier training -- entity synonym support -- proper temporary file creation during tests -- mitie_sklearn backend using mitie tokenization and sklearn classification -- option to fine-tune spacy NER models -- multithreading support of build in REST server (e.g. using gunicorn) -- multitenancy implementation to allow loading multiple models which - share the same backend - -Fixed ------ -- error propagation on failed vector model loading (spacy) -- escaping of special characters during mitie tokenization - - -[0.6-beta] - 2017-01-31 -^^^^^^^^^^^^^^^^^^^^^^^ - -.. _`master`: https://github.com/RasaHQ/rasa_nlu/ - -.. _`Semantic Versioning`: http://semver.org/ diff --git a/docs/nlu/old-nlu-migration-guide.rst b/docs/nlu/old-nlu-migration-guide.rst deleted file mode 100644 index 4e861257cbb4..000000000000 --- a/docs/nlu/old-nlu-migration-guide.rst +++ /dev/null @@ -1,255 +0,0 @@ -:desc: Read more about changes between major versions of our open source - NLP engine and how to migrate from one version to another. - -.. _old-nlu-migration-guide: - -Migration Guide -=============== -This page contains information about changes between major versions and -how you can migrate from one version to another. - -0.14.x to 0.15.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file names have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -model configuration -~~~~~~~~~~~~~~~~~~~ -- The standard pipelines have been renamed. ``spacy_sklearn`` is now - ``pretrained_embeddings_spacy`` and ``tensorflow_embedding`` is now - ``supervised_embeddings``. -- Components names used for nlu config have been changed. - Use component class name in nlu config file. - -custom components -~~~~~~~~~~~~~~~~~ -- The signature of Component's methods have been changed: - - - ``load(...)``, ``create(...)`` and ``cache_key(...)`` methods - additionally take component's meta/config dicts - - ``persist(...)`` method additionally takes file name prefix - Change your custom components accordingly. - -function names -~~~~~~~~~~~~~~ -- ``rasa_nlu.evaluate`` was renamed to ``rasa_nlu.test`` -- ``rasa_nlu.test.run_cv_evaluation`` was renamed to - ``rasa_nlu.test.cross_validate`` -- ``rasa_nlu.train.do_train()`` was renamed to to ``rasa_nlu.train.train()`` - -0.13.x to 0.14.0 ----------------- -- ``/config`` endpoint removed, when training a new model, the user should - always post the configuration as part of the request instead of relying - on the servers config. -- ``ner_duckling`` support has been removed. Use ``DucklingHTTPExtractor`` - instead. More info about ``DucklingHTTPExtractor`` can be found at - https://rasa.com/docs/nlu/components/#ner-duckling-http. - -0.13.x to 0.13.3 ----------------- -- ``rasa_nlu.server`` has to be supplied with a ``yml`` file defining the - model endpoint from which to retrieve training data. The file location has - be passed with the ``--endpoints`` argument, e.g. - ``rasa run --endpoints endpoints.yml`` - ``endpoints.yml`` needs to contain the ``model`` key - with a ``url`` and an optional ``token``. Here's an example: - - .. code-block:: yaml - - model: - url: http://my_model_server.com/models/default/nlu/tags/latest - token: my_model_server_token - - .. note:: - - If you configure ``rasa.nlu.server`` to pull models from a remote server, - the default project name will be used. It is defined - ``RasaNLUModelConfig.DEFAULT_PROJECT_NAME``. - - -- ``rasa.nlu.train`` can also be run with the ``--endpoints`` argument - if you want to pull training data from a URL. Alternatively, the - current ``--url`` syntax is still supported. - - .. code-block:: yaml - - data: - url: http://my_data_server.com/projects/default/data - token: my_data_server_token - - .. note:: - - Your endpoint file may contain entries for both ``model`` and ``data``. - ``rasa.nlu.server`` and ``rasa.nlu.train`` will pick the relevant entry. - -- If you directly access the ``DataRouter`` class or ``rasa.nlu.train``'s - ``do_train()`` method, you can directly create instances of - ``EndpointConfig`` without creating a ``yml`` file. Example: - - .. code-block:: python - - from rasa.nlu.utils import EndpointConfig - from rasa.nlu.data_router import DataRouter - - model_endpoint = EndpointConfig( - url="http://my_model_server.com/models/default/nlu/tags/latest", - token="my_model_server_token" - ) - - interpreter = DataRouter("projects", model_server=model_endpoint) - - -0.12.x to 0.13.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load previously trained models as - the parameters for the tensorflow and CRF models changed. - -CRF model configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -The feature names for the features of the entity CRF have changed: - -+------------------+------------------+ -| old feature name | new feature name | -+==================+==================+ -| pre2 | prefix2 | -+------------------+------------------+ -| pre5 | prefix5 | -+------------------+------------------+ -| word2 | suffix2 | -+------------------+------------------+ -| word3 | suffix3 | -+------------------+------------------+ -| word5 | suffix5 | -+------------------+------------------+ - -Please change these keys in your pipeline configuration of the ``CRFEntityExtractor`` -components ``features`` attribute if you use them. - -0.11.x to 0.12.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file formats have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -model configuration -~~~~~~~~~~~~~~~~~~~ -We have split the configuration in a model configuration and parameters used -to configure the server, train, and evaluate scripts. The configuration -file now only contains the ``pipeline`` as well as the ``language`` -parameters. Example: - - .. code-block:: yaml - - langauge: "en" - - pipeline: - - name: "SpacyNLP" - model: "en" # parameter of the spacy component - - name: "EntitySynonymMapper" - - -All other parameters have either been moved to the scripts -for training, :ref:`serving models <running-the-server>`, or put into the -:ref:`pipeline configuration <components>`. - -persistors: -~~~~~~~~~~~ -- renamed ``AWS_REGION`` to ``AWS_DEFAULT_REGION`` -- always make sure to specify the bucket using env ``BUCKET_NAME`` -- are now configured solely over environment variables - -0.9.x to 0.10.0 ---------------- -- We introduced a new concept called a ``project``. You can have multiple versions - of a model trained for a project. E.g. you can train an initial model and - add more training data and retrain that project. This will result in a new - model version for the same project. This allows you to, allways request - the latest model version from the http server and makes the model handling - more structured. -- If you want to reuse trained models you need to move them in a directory named - after the project. E.g. if you already got a trained model in directory ``my_root/model_20170628-002704`` - you need to move that to ``my_root/my_project/model_20170628-002704``. Your - new projects name will be ``my_project`` and you can query the model using the - http server using ``curl http://localhost:5000/parse?q=hello%20there&project=my_project`` -- Docs moved to https://rasahq.github.io/rasa_nlu/ -- Renamed ``name`` parameter to ``project``. This means for training requests you now need to pass the ``project parameter - instead of ``name``, e.g. ``POST /train?project=my_project_name`` with the body of the - request containing the training data -- Adapted remote cloud storages to support projects. This is a backwards incompatible change, - and unfortunately you need to retrain uploaded models and reupload them. - -0.8.x to 0.9.x ---------------- -- add ``SpacyTokenizer`` to trained spacy_sklearn models metadata (right after the ``SpacyNLP``). alternative is to retrain the model - -0.7.x to 0.8.x ---------------- - -- The training and loading capability for the spacy entity extraction was dropped in favor of the new CRF extractor. That means models need to be retrained using the crf extractor. - -- The parameter and configuration value name of ``backend`` changed to ``pipeline``. - -- There have been changes to the model metadata format. You can either retrain your models or change the stored - metadata.json: - - - rename ``language_name`` to ``language`` - - rename ``backend`` to ``pipeline`` - - for mitie models you need to replace ``feature_extractor`` with ``mitie_feature_extractor_fingerprint``. - That fingerprint depends on the language you are using, for ``en`` it - is ``"mitie_feature_extractor_fingerprint": 10023965992282753551``. - -0.6.x to 0.7.x --------------- - -- The parameter and configuration value name of ``server_model_dir`` changed to ``server_model_dirs``. - -- The parameter and configuration value name of ``write`` changed to ``response_log``. It now configures the - *directory* where the logs should be written to (not a file!) - -- The model metadata format has changed. All paths are now relative with respect to the ``path`` specified in the - configuration during training and loading. If you want to run models that are trained with a - version prev to 0.7 you need to adapt the paths manually in ``metadata.json`` from - - .. code-block:: json - - { - "trained_at": "20170304-191111", - "intent_classifier": "model_XXXX_YYYY_ZZZZ/intent_classifier.pkl", - "training_data": "model_XXXX_YYYY_ZZZZ/training_data.json", - "language_name": "en", - "entity_extractor": "model_XXXX_YYYY_ZZZZ/ner", - "feature_extractor": null, - "backend": "spacy_sklearn" - } - - to something along the lines of this (making all paths relative to the models base dir, which is ``model_XXXX_YYYY_ZZZZ/``): - - .. code-block:: json - - { - "trained_at": "20170304-191111", - "intent_classifier": "intent_classifier.pkl", - "training_data": "training_data.json", - "language_name": "en", - "entity_synonyms": null, - "entity_extractor": "ner", - "feature_extractor": null, - "backend": "spacy_sklearn" - } diff --git a/docs/nlu/training-data-format.rst b/docs/nlu/training-data-format.rst deleted file mode 100644 index e60e5c185b0b..000000000000 --- a/docs/nlu/training-data-format.rst +++ /dev/null @@ -1,244 +0,0 @@ -:desc: Read more about how to format training data with Rasa NLU for open - source natural language processing. - -.. _training-data-format: - -Training Data Format -==================== - -.. edit-link:: - -.. contents:: - :local: - -Data Formats -~~~~~~~~~~~~ - - -You can provide training data as Markdown or as JSON, as a single file or as a directory containing multiple files. -Note that Markdown is usually easier to work with. - - -Markdown Format ---------------- - -Markdown is the easiest Rasa NLU format for humans to read and write. -Examples are listed using the unordered -list syntax, e.g. minus ``-``, asterisk ``*``, or plus ``+``. -Examples are grouped by intent, and entities are annotated as Markdown links, -e.g. ``[entity](entity name)``. - -.. code-block:: md - - ## intent:check_balance - - what is my balance <!-- no entity --> - - how much do I have on my [savings](source_account) <!-- entity "source_account" has value "savings" --> - - how much do I have on my [savings account](source_account:savings) <!-- synonyms, method 1--> - - Could I pay in [yen](currency)? <!-- entity matched by lookup table --> - - ## intent:greet - - hey - - hello - - ## synonym:savings <!-- synonyms, method 2 --> - - pink pig - - ## regex:zipcode - - [0-9]{5} - - ## lookup:currencies <!-- lookup table list --> - - Yen - - USD - - Euro - - ## lookup:additional_currencies <!-- no list to specify lookup table file --> - path/to/currencies.txt - -The training data for Rasa NLU is structured into different parts: - -- common examples -- synonyms -- regex features and -- lookup tables - -While common examples is the only part that is mandatory, including the others will help the NLU model -learn the domain with fewer examples and also help it be more confident of its predictions. - -Synonyms will map extracted entities to the same name, for example mapping "my savings account" to simply "savings". -However, this only happens *after* the entities have been extracted, so you need to provide examples with the synonyms present so that Rasa can learn to pick them up. - -Lookup tables may be specified either directly as lists or as txt files containing newline-separated words or phrases. Upon loading the training data, these files are used to generate case-insensitive regex patterns that are added to the regex features. For example, in this case a list of currency names is supplied so that it is easier to pick out this entity. - -.. note:: - The common theme here is that common examples, regex features and lookup tables merely act as cues to the final NLU model by providing additional features to the machine learning algorithm during training. Therefore, it must not be assumed that having a single example would be enough for the model to robustly identify intents and/or entities across all variants of that example. - -JSON Format ------------ - -The JSON format consists of a top-level object called ``rasa_nlu_data``, with the keys -``common_examples``, ``entity_synonyms`` and ``regex_features``. -The most important one is ``common_examples``. - -.. code-block:: json - - { - "rasa_nlu_data": { - "common_examples": [], - "regex_features" : [], - "lookup_tables" : [], - "entity_synonyms": [] - } - } - -The ``common_examples`` are used to train your model. You should put all of your training -examples in the ``common_examples`` array. -Regex features are a tool to help the classifier detect entities or intents and improve the performance. - - -Improving Intent Classification and Entity Recognition -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Common Examples ---------------- - -Common examples have three components: ``text``, ``intent`` and ``entities``. The first two are strings while the last one is an array. - - - The *text* is the user message [required] - - The *intent* is the intent that should be associated with the text [optional] - - The *entities* are specific parts of the text which need to be identified [optional] - -Entities are specified with a ``start`` and an ``end`` value, which together make a python -style range to apply to the string, e.g. in the example below, with ``text="show me chinese -restaurants"``, then ``text[8:15] == 'chinese'``. Entities can span multiple words, and in -fact the ``value`` field does not have to correspond exactly to the substring in your example. -That way you can map synonyms, or misspellings, to the same ``value``. - -.. code-block:: md - - ## intent:restaurant_search - - show me [chinese](cuisine) restaurants - - -Regular Expression Features ---------------------------- -Regular expressions can be used to support the intent classification and entity extraction. For example, if your entity has a deterministic structure (like a zipcode or an email address), you can use a regular expression to ease detection of that entity. For the zipcode example it might look like this: - -.. code-block:: md - - ## regex:zipcode - - [0-9]{5} - - ## regex:greet - - hey[^\\s]* - -The name doesn't define the entity nor the intent, it is just a human readable description for you to remember what -this regex is used for and is the title of the corresponding pattern feature. As you can see in the above example, you can also use the regex features to improve the intent -classification performance. - -Try to create your regular expressions in a way that they match as few words as possible. E.g. using ``hey[^\s]*`` -instead of ``hey.*``, as the later one might match the whole message whereas the first one only matches a single word. - -Regex features for entity extraction are currently only supported by the ``CRFEntityExtractor`` component! Hence, other entity -extractors, like ``MitieEntityExtractor`` or ``SpacyEntityExtractor`` won't use the generated features and their presence will not improve entity recognition -for these extractors. Currently, all intent classifiers make use of available regex features. - -.. note:: - Regex features don't define entities nor intents! They simply provide patterns to help the classifier - recognize entities and related intents. Hence, you still need to provide intent & entity examples as part of your - training data! - -.. _lookup-tables: - -Lookup Tables -------------- -Lookup tables in the form of external files or lists of elements may also be specified in the training data. -The externally supplied lookup tables must be in a newline-separated format. -For example, ``data/test/lookup_tables/plates.txt`` may contain: - -.. literalinclude:: ../../data/test/lookup_tables/plates.txt - -And can be loaded as: - -.. code-block:: md - - ## lookup:plates - data/test/lookup_tables/plates.txt - -Alternatively, lookup elements may be directly included as a list - -.. code-block:: md - - ## lookup:plates - - beans - - rice - - tacos - - cheese - -When lookup tables are supplied in training data, the contents are combined -into a large, case-insensitive regex pattern that looks for exact matches in -the training examples. These regexes match over multiple tokens, so -``lettuce wrap`` would match ``get me a lettuce wrap ASAP`` as ``[0 0 0 1 1 0]``. -These regexes are processed identically to the regular regex patterns -directly specified in the training data. - -.. note:: - For lookup tables to be effective, there must be a few examples of matches in your training data. Otherwise the model will not learn to use the lookup table match features. - - -.. warning:: - You have to be careful when you add data to the lookup table. - For example if there are false positives or other noise in the table, - this can hurt performance. So make sure your lookup tables contain - clean data. - - -Normalizing Data -~~~~~~~~~~~~~~~~ - -.. _entity_synonyms: - -Entity Synonyms ---------------- -If you define entities as having the same value they will be treated as synonyms. Here is an example of that: - -.. code-block:: md - - ## intent:search - - in the center of [NYC](city:New York City) - - in the centre of [New York City](city) - - -As you can see, the entity ``city`` has the value ``New York City`` in both examples, even though the text in the first -example states ``NYC``. By defining the value attribute to be different from the value found in the text between start -and end index of the entity, you can define a synonym. Whenever the same text will be found, the value will use the -synonym instead of the actual text in the message. - -To use the synonyms defined in your training data, you need to make sure the pipeline contains the ``EntitySynonymMapper`` -component (see :ref:`components`). - -Alternatively, you can add an "entity_synonyms" array to define several synonyms to one entity value. Here is an example of that: - -.. code-block:: md - - ## synonym:New York City - - NYC - - nyc - - the big apple - -.. note:: - Please note that adding synonyms using the above format does not improve the model's classification of those entities. - **Entities must be properly classified before they can be replaced with the synonym value.** - - -Generating More Entity Examples -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is sometimes helpful to generate a bunch of entity examples, for -example if you have a database of restaurant names. There are a couple -of tools built by the community to help with that. - -You can use `Chatito <https://rodrigopivi.github.io/Chatito/>`__ , a tool for generating training datasets in rasa's format using a simple DSL or `Tracy <https://yuukanoo.github.io/tracy>`__, a simple GUI to create training datasets for rasa. - -However, creating synthetic examples usually leads to overfitting, -it is a better idea to use :ref:`lookup-tables` instead if you have a large number -of entity values. diff --git a/docs/nlu/using-nlu-only.rst b/docs/nlu/using-nlu-only.rst deleted file mode 100644 index 12acd9c88d12..000000000000 --- a/docs/nlu/using-nlu-only.rst +++ /dev/null @@ -1,63 +0,0 @@ -:desc: Find out how to use only Rasa NLU as a standalone NLU service for your chatbot or virtual assistant. - -.. _using-nlu-only: - -Using NLU Only -============== - -.. edit-link:: - - -If you want to use Rasa only as an NLU component, you can! - -Training NLU-only models ------------------------- - -To train an NLU model only, run: - -.. code-block:: bash - - rasa train nlu - -This will look for NLU training data files in the ``data/`` directory -and saves a trained model in the ``models/`` directory. -The name of the model will start with ``nlu-``. - -Testing your NLU model on the command line ------------------------------------------- - -To try out your NLU model on the command line, use the ``rasa shell nlu`` command: - - -.. code-block:: bash - - rasa shell nlu - - -This will start the rasa shell and ask you to type in a message to test. -You can keep typing in as many messages as you like. - -Alternatively, you can leave out the ``nlu`` argument and pass in an nlu-only model directly: - -.. code-block:: bash - - rasa shell -m models/nlu-20190515-144445.tar.gz - - - -Running an NLU server ---------------------- - -To start a server with your NLU model, pass in the model name at runtime: - -.. code-block:: bash - - rasa run --enable-api -m models/nlu-20190515-144445.tar.gz - - -You can then request predictions from your model using the ``/model/parse`` endpoint. -To do this, run: - -.. code-block:: bash - - curl localhost:5005/model/parse -d '{"text":"hello"}' diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 000000000000..38a9a2c48ca9 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,63 @@ +{ + "name": "rasa", + "version": "0.0.0", + "private": true, + "scripts": { + "start": "yarn pre-build && docusaurus start", + "pre-build": "yarn copy-md-files && yarn variables && yarn program-outputs && yarn included-sources && yarn autodoc", + "serve": "netlify dev -d build", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "new-version": "docusaurus docs:version", + "variables": "node scripts/compile_variables.js", + "program-outputs": "node scripts/compile_program_outputs.js", + "copy-md-files": "node scripts/copy_md_files.js", + "included-sources": "node scripts/compile_included_sources.js", + "autodoc": "echo 'Generating autodoc' && pydoc-markdown", + "clean": "find docs/sources -type f -not -name '.keep' -print0 | xargs -0 -I {} rm {}", + "ci": "yarn install --frozen-lockfile" + }, + "dependencies": { + "@docusaurus/core": "^2.0.0-alpha.61", + "@docusaurus/plugin-client-redirects": "^2.0.0-alpha.61", + "@docusaurus/plugin-content-docs": "^2.0.0-alpha.61", + "@docusaurus/plugin-sitemap": "^2.0.0-alpha.61", + "@docusaurus/theme-classic": "^2.0.0-alpha.61", + "@fortawesome/fontawesome-svg-core": "^1.2.30", + "@fortawesome/free-solid-svg-icons": "^5.14.0", + "@fortawesome/react-fontawesome": "^0.1.11", + "@philpl/buble": "^0.19.7", + "classnames": "^2.2.6", + "clsx": "^1.1.1", + "core-js": "^3.6.5", + "fs-extra": "^9.0.1", + "globby": "^10.0.1", + "mobx": "^4.3.1", + "react": "^16.8.4", + "react-dom": "^16.8.4", + "react-live": "^2.2.2", + "react-promise": "^3.0.2", + "redoc": "^2.0.0-rc.31", + "remark": "^12.0.0", + "remark-collapse": "^0.1.2", + "remark-sources": "^1.0.1", + "styled-components": "^4.2.0", + "unist-util-visit-children": "^1.1.3" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "devDependencies": { + "netlify-cli": "^2.59.0", + "toml": "^3.0.0" + } +} diff --git a/docs/plugins/google-tagmanager/client.js b/docs/plugins/google-tagmanager/client.js new file mode 100644 index 000000000000..b1db6df72673 --- /dev/null +++ b/docs/plugins/google-tagmanager/client.js @@ -0,0 +1,19 @@ +import ExecutionEnvironment from '@docusaurus/ExecutionEnvironment'; + +export default (function () { + if (!ExecutionEnvironment.canUseDOM) { + return null; + } + + return { + onRouteUpdate({location}) { + if (location) { + setTimeout(() => { + window.dataLayer.push({ + event: 'route-update', + }); + }, 50); + } + }, + }; +})(); diff --git a/docs/plugins/google-tagmanager/index.js b/docs/plugins/google-tagmanager/index.js new file mode 100644 index 000000000000..285e74ae3cf6 --- /dev/null +++ b/docs/plugins/google-tagmanager/index.js @@ -0,0 +1,74 @@ +const path = require('path'); + +module.exports = function (context, options) { + const {siteConfig} = context; + const {themeConfig} = siteConfig; + const {gtm} = themeConfig || {}; + + if (!gtm) { + throw new Error( + `You need to specify 'gtm' object in 'themeConfig' with 'containerID' field in it to use google-tagmanager`, + ); + } + + const {containerID} = gtm; + + if (!containerID) { + throw new Error( + 'You specified the `gtm` object in `themeConfig` but the `containerID` field was missing. ' + + 'Please ensure this is not a mistake.', + ); + } + + const isProd = process.env.NODE_ENV === 'production'; + + return { + name: 'google-tagmanager', + + getClientModules() { + return isProd ? [path.resolve(__dirname, './client')] : []; + }, + + injectHtmlTags() { + if (!isProd) { + return {}; + } + return { + headTags: [ + { + tagName: 'link', + attributes: { + rel: 'preconnect', + href: 'https://www.google-analytics.com', + }, + }, + { + tagName: 'link', + attributes: { + rel: 'preconnect', + href: 'https://www.googletagmanager.com', + }, + }, + { + tagName: 'script', + attributes: { + async: true, + src: `https://www.googletagmanager.com/gtm.js?id=${containerID}`, + }, + }, + { + tagName: 'script', + innerHTML: `(function(w,d,l){w[l]=w[l]||[];w[l].push({'gtm.start':new Date().getTime(),event:'gtm.js'}); })(window,document,'dataLayer');`, + }, + ], + preBodyTags: [ + { + tagName: 'noscript', + innerHTML: `<iframe src="https://www.googletagmanager.com/ns.html?id=${containerID}" height="0" width="0" style="display:none;visibility:hidden"></iframe>`, + } + ], + postBodyTags: [], + }; + }, + }; +}; diff --git a/docs/plugins/included_source.js b/docs/plugins/included_source.js new file mode 100644 index 000000000000..d2e75fffb370 --- /dev/null +++ b/docs/plugins/included_source.js @@ -0,0 +1,79 @@ +/** + This plugin gives us the ability to include source files + in a code-block in the docs, by using the following + syntax: + + ```python (docs/sources/path/to/file.py) + ``` + + To make it work, you need to prefix the source file by `docs/sources/`. + + It relies on `remark-source` and on a pre-build phase, + before docusaurus is started (or built). It allows us to support separate + versions of the docs (and of the program outputs). +*/ +const fs = require('fs-extra'); +const globby = require('globby'); + +const { readFile, outputFile } = fs; + +const defaultOptions = { + docsDir: './docs', + sourceDir: './docs/sources', + include: ['**.mdx', '**.md'], + pathPrefix: '../', +}; + +/** + This function is used copy the included sources + requested in the docs. It parses all the docs files, + finds the included sources and copy them under the `sourceDir`. + + Options: + - docsDir: the directory containing the docs files + - sourceDir: the directory that will contain the included sources + - include: list of patterns to look for doc files + - pathPrefix: a path prefix to use for reading the sources +*/ +async function getIncludedSources(options) { + + options = { ...defaultOptions, ...options }; + const { docsDir, include, sourceDir, pathPrefix } = options; + const cleanedSourceDir = sourceDir.replace('./', ''); + const includedSourceRe =`\`\`\`[a-z]+ \\(${cleanedSourceDir}/([^\\]\\s]+)\\)\n\`\`\``; + + // first, gather all the docs files + const docsFiles = await globby(include, { + cwd: docsDir, + }); + const seen = new Set(); + // second, read every file source + let sourceFiles = await Promise.all(docsFiles.map(async (source) => { + const data = await readFile(`${docsDir}/${source}`); + const sourceFiles = []; + let group, sourceFile, content; + // third, find out if there is a source to be included + // there can be multiple sources in the same file + const re = new RegExp(includedSourceRe, 'gi'); + while ((group = re.exec(data)) !== null) { + sourceFile = group[1]; + if (seen.has(sourceFile)) { + continue; + } + seen.add(sourceFile); + // fourth, read the source file + content = await readFile(`${pathPrefix}${sourceFile}`); + sourceFiles.push([sourceFile, content]); + } + return sourceFiles; + })); + sourceFiles = sourceFiles.flat().filter(pair => pair.length > 0); + + // finally, write all the source files in the `sourceDir` + return await Promise.all(sourceFiles.map(async ([sourceFile, content]) => { + return await outputFile(`${sourceDir}/${sourceFile}`, content); + })); +}; + + +module.exports = getIncludedSources; diff --git a/docs/plugins/program_output.js b/docs/plugins/program_output.js new file mode 100644 index 000000000000..ee8fb964bc1f --- /dev/null +++ b/docs/plugins/program_output.js @@ -0,0 +1,126 @@ +/** + This plugin gives us the ability to insert the output of + a program in a code-block in the docs, by using the following + syntax: + + ```text [rasa --help] + ``` + It is inspired by `remark-source` and also relies on a pre-build phase, + before docusaurus is started (or built). It allows us to support separate + versions of the docs (and of the program outputs). +*/ +const fs = require('fs'); +const globby = require('globby'); +const visitChildren = require('unist-util-visit-children'); +const { promisify } = require('util'); + +const exec = promisify(require('child_process').exec); +const { readFile, writeFile } = fs.promises; + + +const PROGRAM_OUTPUT_RE = /```[a-z]+ \[([^\]]+)\]\n```/; + +const defaultOptions = { + docsDir: './docs', + sourceDir: './docs/sources', + include: ['**.mdx', '**.md'], + commandPrefix: '', +}; + +/** + This function is use to get output of programs + requested in the docs. It parses all the docs files, + generates outputs and save them as files. + + Options: + - docsDir: the directory containing the docs files + - sourceDir: the directory that will contain the program outputs + - include: list of patterns to look for doc files + - commandPrefix: a prefix to be prepended before each command +*/ +async function getProgramOutputs(options) { + + options = { ...defaultOptions, ...options }; + const { docsDir, include, sourceDir, commandPrefix } = options; + // first, gather all the docs files + const docsFiles = await globby(include, { + cwd: docsDir, + }); + const seen = new Set(); + // second, read every file source + let commands = await Promise.all(docsFiles.map(async (source) => { + const data = await readFile(`${docsDir}/${source}`); + const commands = []; + let group, command, stdout; + // third, find out if there is a program output to be generated + // there can be multiple outputs in the same file + const re = new RegExp(PROGRAM_OUTPUT_RE, 'gi'); + while ((group = re.exec(data)) !== null) { + command = group[1]; + if (seen.has(command)) { + continue; + } + seen.add(command); + // fourth, call the command to generate the output + output = await exec(`${commandPrefix} ${command}`); + commands.push([command, output.stdout]); + } + return commands; + })); + commands = commands.flat().filter(pair => pair.length > 0); + + // finally, write all the command outputs as files in the `sourceDir` + return await Promise.all(commands.map(async ([command, output]) => { + return await writeFile(`${sourceDir}/${commandToFilename(command)}`, output); + })); +}; + + +/** + Custom remark plugin to replace the following blocks: + + ```text [rasa --help] + ``` + + with the actual output of the program (here `rasa --help`). + It relies on the output of `getProgramOutputs()` above, + and is inspired by `remark-sources` plugin. +*/ +function remarkProgramOutput(options = {}) { + options = { ...defaultOptions, ...options }; + return (root) => { + visitChildren((node, index, parent) => { + if (node && node.type === 'code') { + const content = readCommandOutput(node.meta, options); + if (content !== undefined) { + node.value = content; + } + } + })(root); + }; +} + + +function readCommandOutput(meta, { sourceDir }) { + if (!meta) { + return undefined; + } + if (meta[0] !== '[' || meta[meta.length - 1] !== ']') { + return undefined; + } + meta = meta.slice(1, -1); + try { + return fs.readFileSync(`${sourceDir}/${commandToFilename(meta)}`, { encoding: 'utf8' }); + } catch (e) { + throw new Error(`Failed to read file: ${meta}`); + } +} + + +function commandToFilename(command) { + return command.replace(/[^a-z0-9]/gi, '_').toLowerCase() + '.txt'; +} + + +module.exports = getProgramOutputs; +module.exports.remarkProgramOutput = remarkProgramOutput; diff --git a/docs/pydoc-markdown.yml b/docs/pydoc-markdown.yml new file mode 100644 index 000000000000..7f182bff41d6 --- /dev/null +++ b/docs/pydoc-markdown.yml @@ -0,0 +1,13 @@ +loaders: + - type: python + search_path: [../] + packages: + - rasa +processors: + - type: filter + - type: smart + - type: crossref +renderer: + type: docusaurus + docs_base_path: docs/ + sidebar_top_level_label: 'Code reference' diff --git a/docs/scripts/compile_included_sources.js b/docs/scripts/compile_included_sources.js new file mode 100644 index 000000000000..e0a4ec8bd22a --- /dev/null +++ b/docs/scripts/compile_included_sources.js @@ -0,0 +1,9 @@ +const getIncludedSources = require('../plugins/included_source.js'); + + + +console.info('Computing included sources'); +getIncludedSources({ + docsDir: './docs', + include: ['**.mdx', '**.md'], +}); diff --git a/docs/scripts/compile_program_outputs.js b/docs/scripts/compile_program_outputs.js new file mode 100644 index 000000000000..edd44b414093 --- /dev/null +++ b/docs/scripts/compile_program_outputs.js @@ -0,0 +1,10 @@ +const getProgramOutputs = require('../plugins/program_output.js'); + + + +console.info('Computing program outputs'); +getProgramOutputs({ + docsDir: './docs', + include: ['**.mdx', '**.md'], + commandPrefix: 'poetry run', +}); diff --git a/docs/scripts/compile_variables.js b/docs/scripts/compile_variables.js new file mode 100644 index 000000000000..18c72f51ecec --- /dev/null +++ b/docs/scripts/compile_variables.js @@ -0,0 +1,32 @@ +const { readFileSync, writeFileSync } = require('fs'); +const { execSync } = require('child_process'); +const toml = require('toml'); + +const VARIABLES_FILE_PATH = './docs/variables.json'; +const PYPROJECT_FILE_PATH = '../pyproject.toml'; +const COMMAND_RASA_SDK_VERSION = + 'python -c "from rasa_sdk import __version__ as rasa_sdk_version; print(rasa_sdk_version)"'; +const DISCLAIMER = 'this file is automatically generated, please do not update it manually'; +const JSON_SPACE_INDENT = 4; + +const getRasaVersion = () => { + const pyproject = readFileSync(PYPROJECT_FILE_PATH).toString(); + return toml.parse(pyproject).tool.poetry.version; +}; + +const getRasaSdkVersion = () => execSync(COMMAND_RASA_SDK_VERSION).toString().trim(); + +const writeVariablesFile = () => { + const variables = JSON.stringify( + { + release: getRasaVersion(), + rasa_sdk_version: getRasaSdkVersion(), + }, + null, + JSON_SPACE_INDENT, + ); + writeFileSync(VARIABLES_FILE_PATH, variables); +}; + +console.info(`Computing docs variables and writing to ${VARIABLES_FILE_PATH}`); +writeVariablesFile(); diff --git a/docs/scripts/copy_md_files.js b/docs/scripts/copy_md_files.js new file mode 100644 index 000000000000..1cbfe269a3f3 --- /dev/null +++ b/docs/scripts/copy_md_files.js @@ -0,0 +1,35 @@ +const fs = require('fs'); + +const { copyFile } = fs.promises; + +const defaultOptions = { + files: {}, + docsDir: './docs' +}; + +/** + This function is used to copy markdown files from a source + outside the `docs/` folder to a destination inside the `docs/` folder. + + Options: + - files: a mapping of source: destination + - docsDir: the docs folder +*/ +async function copyMarkdownFiles(options) { + + options = { ...defaultOptions, ...options }; + const { docsDir, files } = options; + + for (const [source, destination] of Object.entries(files)) { + await copyFile(source, `${docsDir}/${destination}`); + } +}; + + +console.info('Copying markdown files'); +copyMarkdownFiles({ + docsDir: './docs', + files: { + '../CHANGELOG.mdx': 'changelog.mdx', + } +}); diff --git a/docs/sidebars.js b/docs/sidebars.js new file mode 100644 index 000000000000..9ed35a38315b --- /dev/null +++ b/docs/sidebars.js @@ -0,0 +1,191 @@ +module.exports = { + someSidebar: [ + { + type: 'category', + label: 'Building Assistants', + collapsed: false, + items: [ + { + type: 'category', + label: 'Getting Started', + collapsed: true, + items: [ + 'index', + 'prototype-an-assistant', + 'installation', + // 'cheatsheet', + 'migrate-from', + ], + }, + { + type: 'category', + label: 'Best Practices', + collapsed: true, + items: [ + 'conversation-driven-development', + 'generating-nlu-data', + 'writing-stories', + ], + }, + { + type: 'category', + label: 'Conversation Patterns', + collapsed: true, + items: [ + 'chitchat-faqs', + 'business-logic', + 'fallback-handoff', + 'unexpected-input', + 'contextual-conversations', + ], + }, + { + type: 'category', + label: 'Preparing For Production', + collapsed: true, + items: [ + 'messaging-and-voice-channels', + 'tuning-your-model', + 'testing-your-assistant', + 'setting-up-ci-cd', + 'how-to-deploy', + ], + }, + { + type: 'category', + label: 'Reference', + collapsed: true, + items: [ + 'glossary', + require("./docs/reference/sidebar.json"), + ], + }, + ] + }, + { + type: 'category', + label: 'Concepts', + collapsed: false, + items: [ + { + type: 'category', + label: 'Training Data', + items: [ + 'training-data-format', + 'nlu-training-data', + 'stories', + 'rules', + ], + }, + 'domain', + { + type: 'category', + label: 'Config', + items: [ + 'model-configuration', + { + type: 'category', + label: 'Pipeline Components', + items: [ + 'components/language-models', + 'components/tokenizers', + 'components/featurizers', + 'components/intent-classifiers', + 'components/entity-extractors', + 'components/selectors', + 'components/custom-nlu-components', + ], + }, + 'policies', + 'training-data-importers', + ], + }, + { + type: 'category', + label: 'Actions', + items: [ + // 'actions', + 'responses', + { + type: 'category', + label: 'Custom Actions', + items: [ + 'custom-actions', + 'knowledge-bases', + { + type: 'category', + label: 'Rasa SDK', + collapsed: true, + items: [ + 'running-action-server', + 'tracker-dispatcher', + // 'events', + // 'rasa-sdk-changelog' + ], + }, + ], + }, + 'retrieval-actions', + 'forms', + 'reminders-and-external-events', + 'default-actions', + ], + }, + { + type: 'category', + label: 'Channel Connectors', + items: [ + 'connectors/your-own-website', + 'connectors/facebook-messenger', + 'connectors/slack', + 'connectors/telegram', + 'connectors/twilio', + 'connectors/hangouts', + 'connectors/microsoft-bot-framework', + 'connectors/cisco-webex-teams', + 'connectors/rocketchat', + 'connectors/mattermost', + ], + }, + { + type: 'category', + label: 'Architecture', // name still confusing with architecture page elsewhere + items: [ + 'tracker-stores', + 'event-brokers', + 'model-storage', + 'lock-stores', + 'nlg', + ], + }, + ] + }, + { + type: 'category', + label: 'APIs', + collapsed: true, + items: [ + 'command-line-interface', + { + type: 'category', + label: 'HTTP API', + collapsed: true, + items: [ + 'http-api', + 'http-api-spec', + ], + }, + 'jupyter-notebooks', + ], + }, + { + type: 'category', + label: 'Change Log', + collapsed: true, + items: [ + 'changelog', + 'migration-guide', + ], + }, + ], +}; diff --git a/docs/src/components/button.jsx b/docs/src/components/button.jsx new file mode 100644 index 000000000000..882892a29e28 --- /dev/null +++ b/docs/src/components/button.jsx @@ -0,0 +1,34 @@ +import React from 'react'; +import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; +import { faCircleNotch } from '@fortawesome/free-solid-svg-icons' + +// FIXME: surely not the right place for this +const COLOR_PURPLE_RASA = '#5a17ee'; +const COLOR_DISABLED_GREY = '#bbb'; +const COLOR_WHITE = '#fff'; + +const Button = ({loading, style, ...props}) => ( + <> + <button + {...props} + style={{ + backgroundColor: props.disabled ? COLOR_DISABLED_GREY : COLOR_PURPLE_RASA, + border: '1px solid transparent', + color: COLOR_WHITE, + borderRadius: 8, + padding: 12, + fontSize: 15, + fontWeight: '600', + cursor: props.disabled ? 'default' : 'pointer', + ...style, + }} + /> + {loading ? <FontAwesomeIcon icon={faCircleNotch} spin style={styles.spinner} color={COLOR_DISABLED_GREY} /> : undefined} + </> +); + +const styles = { + spinner: { marginLeft: 8 } +}; + +export default Button; diff --git a/docs/src/components/prototyper/context.js b/docs/src/components/prototyper/context.js new file mode 100644 index 000000000000..3c746fd0ead3 --- /dev/null +++ b/docs/src/components/prototyper/context.js @@ -0,0 +1,5 @@ +import React from 'react'; + +const PrototyperContext = React.createContext(); + +export default PrototyperContext; diff --git a/docs/src/components/prototyper/download-button.jsx b/docs/src/components/prototyper/download-button.jsx new file mode 100644 index 000000000000..12b72f94f25f --- /dev/null +++ b/docs/src/components/prototyper/download-button.jsx @@ -0,0 +1,20 @@ +import React from 'react'; +import Button from '@site/src/components/button'; +import PrototyperContext from './context'; + + +const DownloadButton = (props) => { + const prototyperContext = React.useContext(PrototyperContext); + + return ( + <Button + onClick={prototyperContext.downloadProject} + disabled={!prototyperContext.hasTrained || !!prototyperContext.isTraining} + {...props} + > + Download project + </Button> + ); +} + +export default DownloadButton; diff --git a/docs/src/components/prototyper/index.jsx b/docs/src/components/prototyper/index.jsx new file mode 100644 index 000000000000..e1ce9a07ee52 --- /dev/null +++ b/docs/src/components/prototyper/index.jsx @@ -0,0 +1,6 @@ +import DownloadButton from './download-button'; +import Prototyper from './prototyper'; +import TrainButton from './train-button'; + +export default Prototyper; +export { DownloadButton, TrainButton }; diff --git a/docs/src/components/prototyper/prototyper.jsx b/docs/src/components/prototyper/prototyper.jsx new file mode 100644 index 000000000000..9a22552fc9b4 --- /dev/null +++ b/docs/src/components/prototyper/prototyper.jsx @@ -0,0 +1,155 @@ +import React from 'react'; + +import ExecutionEnvironment from '@docusaurus/ExecutionEnvironment'; +import ThemeContext from '@theme/theme-context'; +import { isProductionBuild, uuidv4 } from '@site/src/utils'; +import PrototyperContext from './context'; + +const jsonHeaders = { + 'Accept': 'application/json', + 'Content-Type': 'application/json' +}; +const trackerPollingInterval = 2000; + + +const Prototyper = ({children, startPrototyperApi, trainModelApi, chatBlockSelector, chatBlockScriptUrl}) => { + const [trackingId, setTrackingId] = React.useState(null); + const [hasStarted, setHasStarted] = React.useState(false); + const [projectDownloadUrl, setProjectDownloadUrl] = React.useState(null); + const [trainingData, setTrainingData] = React.useState({}); + const [pollingIntervalId, setPollingIntervalId] = React.useState(null); + const [hasTrained, setHasTrained] = React.useState(false); + const [isTraining, setIsTraining] = React.useState(false); + + // FIXME: once we can use `rasa-ui` outside of `rasa-x`, we can remove this + const insertChatBlockScript = () => { + if (ExecutionEnvironment.canUseDOM) { + const scriptElement = document.createElement('script'); + scriptElement.src = chatBlockScriptUrl; + document.body.appendChild(scriptElement); + } + }; + + // update tracking id when component is mounting + React.useEffect(() => { + setTrackingId(isProductionBuild() ? uuidv4() : "the-hash"); + insertChatBlockScript(); + }, []); + // initialize the chatblock once we have a tracking id + React.useEffect(() => { + if (trackingId !== null) { + updateChatBlock(); + } + }, [trackingId]); + + const onLiveCodeStart = (name, value) => { + setTrainingData((prevTrainingData) => ({...prevTrainingData, [name]: value})); + }; + + const onLiveCodeChange = (name, value) => { + setTrainingData((prevTrainingData) => ({...prevTrainingData, [name]: value})); + if (!hasStarted) { + // track the start here + setHasStarted(true); + fetch(startPrototyperApi, { + method: 'POST', + headers: jsonHeaders, + body: JSON.stringify({ + tracking_id: trackingId, + editor: 'main', + }) + }); + } + }; + + const trainModel = () => { + setIsTraining(true); + // train the model, resetting the chatblock + if (pollingIntervalId) { + updateChatBlock(); + clearInterval(pollingIntervalId); + setPollingIntervalId(null); + } + + fetch(trainModelApi, { + method: 'POST', + headers: jsonHeaders, + body: JSON.stringify({ tracking_id: trackingId, ...trainingData }), + }) + .then(response => response.json()) + .then(data => { + setHasTrained(true); + setProjectDownloadUrl(data.project_download_url); + if (data.rasa_service_url) { + startFetchingTracker(data.rasa_service_url); + } + }). + finally(() => setIsTraining(false)); + }; + + const downloadProject = () => { + if (projectDownloadUrl) { + location.href = projectDownloadUrl; + } + }; + + const updateChatBlock = (baseUrl = "", tracker = {}) => { + if (ExecutionEnvironment.canUseDOM) { + if (!window.ChatBlock) { + // FIXME: once we can use `rasa-ui` outside of `rasa-x`, we can remove this + setTimeout(() => updateChatBlock(baseUrl, tracker), 500); + } else { + window.ChatBlock.default.init({ + onSendMessage: (message) => { + sendMessage(baseUrl, message); + }, + username: trackingId, + tracker, + selector: chatBlockSelector, + }); + } + } + }; + + const fetchTracker = (baseUrl) => { + fetch(`${baseUrl}/conversations/${trackingId}/tracker`, { + method: "GET", + header: 'jsonHeaders', + }) + .then(response => response.json()) + .then(tracker => updateChatBlock(baseUrl, tracker)); + }; + + const sendMessage = (baseUrl, message) => { + fetch(`${baseUrl}/webhooks/rest/webhook`, { + method: "POST", + headers: jsonHeaders, + body: JSON.stringify({ + sender: trackingId, + message: message + }), + }) + .then(() => fetchTracker(baseUrl)); + }; + + const startFetchingTracker = (baseUrl) => { + + fetchTracker(baseUrl); + + const updateIntervalId = setInterval(() => { + fetchTracker(baseUrl); + }, trackerPollingInterval); + + setPollingIntervalId(updateIntervalId); + }; + + return ( + <ThemeContext.Provider value={{onLiveCodeChange, onLiveCodeStart}}> + <PrototyperContext.Provider value={{ trainModel, downloadProject, hasTrained, isTraining }}> + {children} + </PrototyperContext.Provider> + </ThemeContext.Provider> + ); +}; + +export default Prototyper; diff --git a/docs/src/components/prototyper/train-button.jsx b/docs/src/components/prototyper/train-button.jsx new file mode 100644 index 000000000000..4788fa626f9b --- /dev/null +++ b/docs/src/components/prototyper/train-button.jsx @@ -0,0 +1,21 @@ +import React from 'react'; +import Button from '@site/src/components/button'; +import PrototyperContext from './context'; + + +const TrainButton = (props) => { + const prototyperContext = React.useContext(PrototyperContext); + + return ( + <Button + onClick={prototyperContext.trainModel} + disabled={!!prototyperContext.isTraining} + loading={!!prototyperContext.isTraining} + {...props} + > + Train + </Button> + ); +} + +export default TrainButton; diff --git a/docs/src/components/redoc.jsx b/docs/src/components/redoc.jsx new file mode 100644 index 000000000000..d1d14b912fa0 --- /dev/null +++ b/docs/src/components/redoc.jsx @@ -0,0 +1,26 @@ +import React from 'react'; +import usePromise from 'react-promise'; +import BrowserOnly from '@docusaurus/BrowserOnly'; + + +const Redoc = (props) => { + + const getRedocStandalone = React.useCallback(async () => { + // using import() instead of require() keeps the package tree-shakeable. + const redoc = await import('redoc'); + return redoc.RedocStandalone; + }, []); + + return ( + <BrowserOnly fallback={<div>Loading...</div>}> + {() => { + // we need to import this here instead of at the top-level + // because it causes issues in production builds + const {value: RedocStandalone, loading} = usePromise(getRedocStandalone); + return loading ? <div>Loading...</div> : <RedocStandalone {...props} />; + }} + </BrowserOnly> + ); +} + +export default Redoc; diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css new file mode 100644 index 000000000000..fb359c2bdf82 --- /dev/null +++ b/docs/src/css/custom.css @@ -0,0 +1,42 @@ +/* stylelint-disable docusaurus/copyright-header */ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #5a17ee; + --ifm-color-primary-dark: rgb(33, 175, 144); + --ifm-color-primary-darker: rgb(31, 165, 136); + --ifm-color-primary-darkest: rgb(26, 136, 112); + --ifm-color-primary-light: rgb(70, 203, 174); + --ifm-color-primary-lighter: rgb(102, 212, 189); + --ifm-color-primary-lightest: rgb(146, 224, 208); + --ifm-code-font-size: 95%; +} + +.docusaurus-highlight-code-line { + background-color: rgb(72, 77, 91); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + +/** + FIXME: should be next to component, but at the moment it's loaded minified. + Once we can use `rasa-ui` outside of `rasa-x`, we can fix this. +*/ +#rasa-chat-block { + height: 600px; + border: 1px solid #e8e8e8; + border-radius: 4px; +} +/** + FIXME: fixes a font issue on the chat block initial screen, + far from perfect but not as ugly as before +*/ +h4.css-1m1icag { + font-family: var(--ifm-font-family-base); +} diff --git a/docs/src/utils.js b/docs/src/utils.js new file mode 100644 index 000000000000..c87c4bb16ff3 --- /dev/null +++ b/docs/src/utils.js @@ -0,0 +1,8 @@ +const uuidv4 = () => ([1e7]+-1e3+-4e3+-8e3+-1e11).replace( + /[018]/g, + c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16) +); + +const isProductionBuild = () => typeof window.__REACT_ERROR_OVERLAY_GLOBAL_HOOK__ === 'undefined'; + +export { isProductionBuild, uuidv4 }; diff --git a/docs/static/_redirects b/docs/static/_redirects new file mode 100644 index 000000000000..13ce1ab80ed3 --- /dev/null +++ b/docs/static/_redirects @@ -0,0 +1 @@ +/docs/rasa/next/* /:splat 200! diff --git a/docs/_static/images/component_lifecycle.png b/docs/static/img/component_lifecycle.png similarity index 100% rename from docs/_static/images/component_lifecycle.png rename to docs/static/img/component_lifecycle.png diff --git a/docs/static/img/contextual_interjection.png b/docs/static/img/contextual_interjection.png new file mode 100644 index 000000000000..901782a1b63b Binary files /dev/null and b/docs/static/img/contextual_interjection.png differ diff --git a/docs/_static/images/dialogflow_export.png b/docs/static/img/dialogflow_export.png similarity index 100% rename from docs/_static/images/dialogflow_export.png rename to docs/static/img/dialogflow_export.png diff --git a/docs/_static/images/dialogflow_export_2.png b/docs/static/img/dialogflow_export_2.png similarity index 100% rename from docs/_static/images/dialogflow_export_2.png rename to docs/static/img/dialogflow_export_2.png diff --git a/docs/static/img/favicon.ico b/docs/static/img/favicon.ico new file mode 100644 index 000000000000..7cd385726678 Binary files /dev/null and b/docs/static/img/favicon.ico differ diff --git a/docs/static/img/generic_interjection.png b/docs/static/img/generic_interjection.png new file mode 100644 index 000000000000..7ef99728a685 Binary files /dev/null and b/docs/static/img/generic_interjection.png differ diff --git a/docs/static/img/generic_interjection_handled.png b/docs/static/img/generic_interjection_handled.png new file mode 100644 index 000000000000..a764f72616cd Binary files /dev/null and b/docs/static/img/generic_interjection_handled.png differ diff --git a/docs/static/img/greet_interjection.png b/docs/static/img/greet_interjection.png new file mode 100644 index 000000000000..3e0654c3fa45 Binary files /dev/null and b/docs/static/img/greet_interjection.png differ diff --git a/docs/static/img/intent_mappings.png b/docs/static/img/intent_mappings.png new file mode 100644 index 000000000000..06d8eb333f5a Binary files /dev/null and b/docs/static/img/intent_mappings.png differ diff --git a/docs/_static/images/knowledge-base-example.png b/docs/static/img/knowledge-base-example.png similarity index 100% rename from docs/_static/images/knowledge-base-example.png rename to docs/static/img/knowledge-base-example.png diff --git a/docs/static/img/logo.svg b/docs/static/img/logo.svg new file mode 100644 index 000000000000..01493a58a6bc --- /dev/null +++ b/docs/static/img/logo.svg @@ -0,0 +1 @@ +<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 290 141"><g fill="#5a17ee"><path d="M210.5 29v50.11h12V64h26v15h12V29zm38 23h-26V41h26zm-49 4.5V79h-49V67h37v-7h-37V29h49v12h-37v7h37v8.5zM89.5 29v50.11h12V64h26v15h12V29zm38 23h-26V41h26zm-65.09 7.94l15.36-5.59.73-.29V29h-49v50h11.84V67.59l9-3.26L63.89 79h14.82zm-21-5.12V41H66.5v4.68z"/><path d="M247.15 90.54h23.91v-72.1H18.94v72.1h180l48.19 23.82V90.54zM5.5 5h279v99h-23.91v32l-64.77-32H5.5z"/></g></svg> diff --git a/docs/_static/images/luis_export.png b/docs/static/img/luis_export.png similarity index 100% rename from docs/_static/images/luis_export.png rename to docs/static/img/luis_export.png diff --git a/docs/static/img/memoization_policy_convo.png b/docs/static/img/memoization_policy_convo.png new file mode 100644 index 000000000000..fe46270e2e77 Binary files /dev/null and b/docs/static/img/memoization_policy_convo.png differ diff --git a/docs/_static/images/rasa-message-processing.png b/docs/static/img/rasa-message-processing.png similarity index 100% rename from docs/_static/images/rasa-message-processing.png rename to docs/static/img/rasa-message-processing.png diff --git a/docs/static/js/rasa-chatblock.min.js b/docs/static/js/rasa-chatblock.min.js new file mode 100644 index 000000000000..72d049df05db --- /dev/null +++ b/docs/static/js/rasa-chatblock.min.js @@ -0,0 +1,43 @@ +!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.ChatBlock=t():e.ChatBlock=t()}(window,(function(){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=42)}([function(e,t,n){"use strict";e.exports=n(27)},function(e,t,n){"use strict";n.d(t,"a",(function(){return d})),n.d(t,"b",(function(){return k})),n.d(t,"c",(function(){return p})),n.d(t,"d",(function(){return y})),n.d(t,"e",(function(){return w})),n.d(t,"f",(function(){return h}));var r=n(21),o=n.n(r),i=n(0),a=n(18),l=n(9),u=n(8),c=n(16),s=n(6),f=Object(i.createContext)("undefined"!=typeof HTMLElement?Object(a.a)():null),p=Object(i.createContext)({}),d=f.Provider,h=function(e){return Object(i.forwardRef)((function(t,n){return Object(i.createElement)(f.Consumer,null,(function(r){return e(t,r,n)}))}))},m="__EMOTION_TYPE_PLEASE_DO_NOT_USE__",g=Object.prototype.hasOwnProperty,v=function(e,t,n,r){var o=null===n?t.css:t.css(n);"string"==typeof o&&void 0!==e.registered[o]&&(o=e.registered[o]);var a=t[m],c=[o],s="";"string"==typeof t.className?s=Object(l.a)(e.registered,c,t.className):null!=t.className&&(s=t.className+" ");var f=Object(u.a)(c);Object(l.b)(e,f,"string"==typeof a);s+=e.key+"-"+f.name;var p={};for(var d in t)g.call(t,d)&&"css"!==d&&d!==m&&(p[d]=t[d]);return p.ref=r,p.className=s,Object(i.createElement)(a,p)},b=h((function(e,t,n){return"function"==typeof e.css?Object(i.createElement)(p.Consumer,null,(function(r){return v(t,e,r,n)})):v(t,e,null,n)}));var y=function(e,t){var n=arguments;if(null==t||!g.call(t,"css"))return i.createElement.apply(void 0,n);var r=n.length,o=new Array(r);o[0]=b;var a={};for(var l in t)g.call(t,l)&&(a[l]=t[l]);a[m]=e,o[1]=a;for(var u=2;u<r;u++)o[u]=n[u];return i.createElement.apply(null,o)},w=(i.Component,function(){var e=s.a.apply(void 0,arguments),t="animation-"+e.name;return{name:t,styles:"@keyframes "+t+"{"+e.styles+"}",anim:1,toString:function(){return"_EMO_"+this.name+"_"+this.styles+"_EMO_"}}}),x=function e(t){for(var n=t.length,r=0,o="";r<n;r++){var i=t[r];if(null!=i){var a=void 0;switch(typeof i){case"boolean":break;case"object":if(Array.isArray(i))a=e(i);else for(var l in a="",i)i[l]&&l&&(a&&(a+=" "),a+=l);break;default:a=i}a&&(o&&(o+=" "),o+=a)}}return o};function E(e,t,n){var r=[],o=Object(l.a)(e,r,n);return r.length<2?n:o+t(r)}var k=h((function(e,t){return Object(i.createElement)(p.Consumer,null,(function(n){var r=function(){for(var e=arguments.length,n=new Array(e),r=0;r<e;r++)n[r]=arguments[r];var o=Object(u.a)(n,t.registered);return Object(l.b)(t,o,!1),t.key+"-"+o.name},o={css:r,cx:function(){for(var e=arguments.length,n=new Array(e),o=0;o<e;o++)n[o]=arguments[o];return E(t.registered,r,x(n))},theme:n},i=e.children(o);return!0,i}))}))},function(e,t,n){e.exports=n(33)()},function(e,t,n){"use strict";n.r(t),n.d(t,"get",(function(){return c})),n.d(t,"createParser",(function(){return s})),n.d(t,"createStyleFunction",(function(){return d})),n.d(t,"compose",(function(){return m})),n.d(t,"system",(function(){return h})),n.d(t,"margin",(function(){return H})),n.d(t,"padding",(function(){return B})),n.d(t,"space",(function(){return W})),n.d(t,"color",(function(){return y})),n.d(t,"layout",(function(){return g})),n.d(t,"typography",(function(){return x})),n.d(t,"flexbox",(function(){return k})),n.d(t,"border",(function(){return P})),n.d(t,"background",(function(){return z})),n.d(t,"position",(function(){return D})),n.d(t,"grid",(function(){return C})),n.d(t,"shadow",(function(){return U})),n.d(t,"boxShadow",(function(){return $})),n.d(t,"textShadow",(function(){return $})),n.d(t,"variant",(function(){return Q})),n.d(t,"buttonStyle",(function(){return X})),n.d(t,"textStyle",(function(){return G})),n.d(t,"colorStyle",(function(){return q})),n.d(t,"borders",(function(){return A})),n.d(t,"width",(function(){return K})),n.d(t,"height",(function(){return Z})),n.d(t,"minWidth",(function(){return J})),n.d(t,"minHeight",(function(){return ee})),n.d(t,"maxWidth",(function(){return te})),n.d(t,"maxHeight",(function(){return ne})),n.d(t,"size",(function(){return re})),n.d(t,"verticalAlign",(function(){return oe})),n.d(t,"display",(function(){return ie})),n.d(t,"overflow",(function(){return ae})),n.d(t,"overflowX",(function(){return le})),n.d(t,"overflowY",(function(){return ue})),n.d(t,"opacity",(function(){return ce})),n.d(t,"fontSize",(function(){return se})),n.d(t,"fontFamily",(function(){return fe})),n.d(t,"fontWeight",(function(){return pe})),n.d(t,"lineHeight",(function(){return de})),n.d(t,"textAlign",(function(){return he})),n.d(t,"fontStyle",(function(){return me})),n.d(t,"letterSpacing",(function(){return ge})),n.d(t,"alignItems",(function(){return ve})),n.d(t,"alignContent",(function(){return be})),n.d(t,"justifyItems",(function(){return ye})),n.d(t,"justifyContent",(function(){return we})),n.d(t,"flexWrap",(function(){return xe})),n.d(t,"flexDirection",(function(){return Ee})),n.d(t,"flex",(function(){return ke})),n.d(t,"flexGrow",(function(){return Se})),n.d(t,"flexShrink",(function(){return Oe})),n.d(t,"flexBasis",(function(){return Ce})),n.d(t,"justifySelf",(function(){return Te})),n.d(t,"alignSelf",(function(){return _e})),n.d(t,"order",(function(){return Pe})),n.d(t,"gridGap",(function(){return Ae})),n.d(t,"gridColumnGap",(function(){return Me})),n.d(t,"gridRowGap",(function(){return ze})),n.d(t,"gridColumn",(function(){return Fe})),n.d(t,"gridRow",(function(){return je})),n.d(t,"gridAutoFlow",(function(){return De})),n.d(t,"gridAutoColumns",(function(){return Re})),n.d(t,"gridAutoRows",(function(){return Ie})),n.d(t,"gridTemplateColumns",(function(){return Le})),n.d(t,"gridTemplateRows",(function(){return Ne})),n.d(t,"gridTemplateAreas",(function(){return Ve})),n.d(t,"gridArea",(function(){return He})),n.d(t,"borderWidth",(function(){return Be})),n.d(t,"borderStyle",(function(){return We})),n.d(t,"borderColor",(function(){return Ue})),n.d(t,"borderTop",(function(){return $e})),n.d(t,"borderRight",(function(){return Ye})),n.d(t,"borderBottom",(function(){return Qe})),n.d(t,"borderLeft",(function(){return Xe})),n.d(t,"borderRadius",(function(){return Ge})),n.d(t,"backgroundImage",(function(){return qe})),n.d(t,"backgroundSize",(function(){return Ke})),n.d(t,"backgroundPosition",(function(){return Ze})),n.d(t,"backgroundRepeat",(function(){return Je})),n.d(t,"zIndex",(function(){return et})),n.d(t,"top",(function(){return tt})),n.d(t,"right",(function(){return nt})),n.d(t,"bottom",(function(){return rt})),n.d(t,"left",(function(){return ot})),n.d(t,"style",(function(){return it}));var r=n(4),o=n.n(r),i=function(e,t){var n=o()({},e,t);for(var r in e){var i;e[r]&&"object"==typeof t[r]&&o()(n,((i={})[r]=o()(e[r],t[r]),i))}return n},a={breakpoints:[40,52,64].map((function(e){return e+"em"}))},l=function(e){return"@media screen and (min-width: "+e+")"},u=function(e,t){return c(t,e,e)},c=function(e,t,n,r,o){for(t=t&&t.split?t.split("."):[t],r=0;r<t.length;r++)e=e?e[t[r]]:o;return e===o?n:e},s=function e(t){var n={},r=function(e){var r,u,s={},d=!1,h=e.theme&&e.theme.disableStyledSystemCache;for(var m in e)if(t[m]){var g=t[m],v=e[m],b=c(e.theme,g.scale,g.defaults);if("object"!=typeof v)o()(s,g(v,b,e));else{if(n.breakpoints=!h&&n.breakpoints||c(e.theme,"breakpoints",a.breakpoints),Array.isArray(v)){n.media=!h&&n.media||[null].concat(n.breakpoints.map(l)),s=i(s,f(n.media,g,b,v,e));continue}null!==v&&(s=i(s,p(n.breakpoints,g,b,v,e)),d=!0)}}return d&&(r=s,u={},Object.keys(r).sort((function(e,t){return e.localeCompare(t,void 0,{numeric:!0,sensitivity:"base"})})).forEach((function(e){u[e]=r[e]})),s=u),s};r.config=t,r.propNames=Object.keys(t),r.cache=n;var u=Object.keys(t).filter((function(e){return"config"!==e}));return u.length>1&&u.forEach((function(n){var o;r[n]=e(((o={})[n]=t[n],o))})),r},f=function(e,t,n,r,i){var a={};return r.slice(0,e.length).forEach((function(r,l){var u,c=e[l],s=t(r,n,i);c?o()(a,((u={})[c]=o()({},a[c],s),u)):o()(a,s)})),a},p=function(e,t,n,r,i){var a={};for(var u in r){var c=e[u],s=t(r[u],n,i);if(c){var f,p=l(c);o()(a,((f={})[p]=o()({},a[p],s),f))}else o()(a,s)}return a},d=function(e){var t=e.properties,n=e.property,r=e.scale,o=e.transform,i=void 0===o?u:o,a=e.defaultScale;t=t||[n];var l=function(e,n,r){var o={},a=i(e,n,r);if(null!==a)return t.forEach((function(e){o[e]=a})),o};return l.scale=r,l.defaults=a,l},h=function(e){void 0===e&&(e={});var t={};return Object.keys(e).forEach((function(n){var r=e[n];t[n]=!0!==r?"function"!=typeof r?d(r):r:d({property:n,scale:n})})),s(t)},m=function(){for(var e={},t=arguments.length,n=new Array(t),r=0;r<t;r++)n[r]=arguments[r];n.forEach((function(t){t&&t.config&&o()(e,t.config)}));var i=s(e);return i},g=h({width:{property:"width",scale:"sizes",transform:function(e,t){return c(t,e,!function(e){return"number"==typeof e&&!isNaN(e)}(e)||e>1?e:100*e+"%")}},height:{property:"height",scale:"sizes"},minWidth:{property:"minWidth",scale:"sizes"},minHeight:{property:"minHeight",scale:"sizes"},maxWidth:{property:"maxWidth",scale:"sizes"},maxHeight:{property:"maxHeight",scale:"sizes"},size:{properties:["width","height"],scale:"sizes"},overflow:!0,overflowX:!0,overflowY:!0,display:!0,verticalAlign:!0}),v=g,b={color:{property:"color",scale:"colors"},backgroundColor:{property:"backgroundColor",scale:"colors"},opacity:!0};b.bg=b.backgroundColor;var y=h(b),w=y,x=h({fontFamily:{property:"fontFamily",scale:"fonts"},fontSize:{property:"fontSize",scale:"fontSizes",defaultScale:[12,14,16,20,24,32,48,64,72]},fontWeight:{property:"fontWeight",scale:"fontWeights"},lineHeight:{property:"lineHeight",scale:"lineHeights"},letterSpacing:{property:"letterSpacing",scale:"letterSpacings"},textAlign:!0,fontStyle:!0}),E=x,k=h({alignItems:!0,alignContent:!0,justifyItems:!0,justifyContent:!0,flexWrap:!0,flexDirection:!0,flex:!0,flexGrow:!0,flexShrink:!0,flexBasis:!0,justifySelf:!0,alignSelf:!0,order:!0}),S=k,O={space:[0,4,8,16,32,64,128,256,512]},C=h({gridGap:{property:"gridGap",scale:"space",defaultScale:O.space},gridColumnGap:{property:"gridColumnGap",scale:"space",defaultScale:O.space},gridRowGap:{property:"gridRowGap",scale:"space",defaultScale:O.space},gridColumn:!0,gridRow:!0,gridAutoFlow:!0,gridAutoColumns:!0,gridAutoRows:!0,gridTemplateColumns:!0,gridTemplateRows:!0,gridTemplateAreas:!0,gridArea:!0}),T=C,_={border:{property:"border",scale:"borders"},borderWidth:{property:"borderWidth",scale:"borderWidths"},borderStyle:{property:"borderStyle",scale:"borderStyles"},borderColor:{property:"borderColor",scale:"colors"},borderRadius:{property:"borderRadius",scale:"radii"},borderTop:{property:"borderTop",scale:"borders"},borderTopLeftRadius:{property:"borderTopLeftRadius",scale:"radii"},borderTopRightRadius:{property:"borderTopRightRadius",scale:"radii"},borderRight:{property:"borderRight",scale:"borders"},borderBottom:{property:"borderBottom",scale:"borders"},borderBottomLeftRadius:{property:"borderBottomLeftRadius",scale:"radii"},borderBottomRightRadius:{property:"borderBottomRightRadius",scale:"radii"},borderLeft:{property:"borderLeft",scale:"borders"},borderX:{properties:["borderLeft","borderRight"],scale:"borders"},borderY:{properties:["borderTop","borderBottom"],scale:"borders"},borderTopWidth:{property:"borderTopWidth",scale:"borderWidths"},borderTopColor:{property:"borderTopColor",scale:"colors"},borderTopStyle:{property:"borderTopStyle",scale:"borderStyles"}};_.borderTopLeftRadius={property:"borderTopLeftRadius",scale:"radii"},_.borderTopRightRadius={property:"borderTopRightRadius",scale:"radii"},_.borderBottomWidth={property:"borderBottomWidth",scale:"borderWidths"},_.borderBottomColor={property:"borderBottomColor",scale:"colors"},_.borderBottomStyle={property:"borderBottomStyle",scale:"borderStyles"},_.borderBottomLeftRadius={property:"borderBottomLeftRadius",scale:"radii"},_.borderBottomRightRadius={property:"borderBottomRightRadius",scale:"radii"},_.borderLeftWidth={property:"borderLeftWidth",scale:"borderWidths"},_.borderLeftColor={property:"borderLeftColor",scale:"colors"},_.borderLeftStyle={property:"borderLeftStyle",scale:"borderStyles"},_.borderRightWidth={property:"borderRightWidth",scale:"borderWidths"},_.borderRightColor={property:"borderRightColor",scale:"colors"},_.borderRightStyle={property:"borderRightStyle",scale:"borderStyles"};var P=h(_),A=P,M={background:!0,backgroundImage:!0,backgroundSize:!0,backgroundPosition:!0,backgroundRepeat:!0};M.bgImage=M.backgroundImage,M.bgSize=M.backgroundSize,M.bgPosition=M.backgroundPosition,M.bgRepeat=M.backgroundRepeat;var z=h(M),F=z,j={space:[0,4,8,16,32,64,128,256,512]},D=h({position:!0,zIndex:{property:"zIndex",scale:"zIndices"},top:{property:"top",scale:"space",defaultScale:j.space},right:{property:"right",scale:"space",defaultScale:j.space},bottom:{property:"bottom",scale:"space",defaultScale:j.space},left:{property:"left",scale:"space",defaultScale:j.space}}),R=D,I={space:[0,4,8,16,32,64,128,256,512]},L=function(e){return"number"==typeof e&&!isNaN(e)},N=function(e,t){if(!L(e))return c(t,e,e);var n=e<0,r=Math.abs(e),o=c(t,r,r);return L(o)?o*(n?-1:1):n?"-"+o:o},V={};V.margin={margin:{property:"margin",scale:"space",transform:N,defaultScale:I.space},marginTop:{property:"marginTop",scale:"space",transform:N,defaultScale:I.space},marginRight:{property:"marginRight",scale:"space",transform:N,defaultScale:I.space},marginBottom:{property:"marginBottom",scale:"space",transform:N,defaultScale:I.space},marginLeft:{property:"marginLeft",scale:"space",transform:N,defaultScale:I.space},marginX:{properties:["marginLeft","marginRight"],scale:"space",transform:N,defaultScale:I.space},marginY:{properties:["marginTop","marginBottom"],scale:"space",transform:N,defaultScale:I.space}},V.margin.m=V.margin.margin,V.margin.mt=V.margin.marginTop,V.margin.mr=V.margin.marginRight,V.margin.mb=V.margin.marginBottom,V.margin.ml=V.margin.marginLeft,V.margin.mx=V.margin.marginX,V.margin.my=V.margin.marginY,V.padding={padding:{property:"padding",scale:"space",defaultScale:I.space},paddingTop:{property:"paddingTop",scale:"space",defaultScale:I.space},paddingRight:{property:"paddingRight",scale:"space",defaultScale:I.space},paddingBottom:{property:"paddingBottom",scale:"space",defaultScale:I.space},paddingLeft:{property:"paddingLeft",scale:"space",defaultScale:I.space},paddingX:{properties:["paddingLeft","paddingRight"],scale:"space",defaultScale:I.space},paddingY:{properties:["paddingTop","paddingBottom"],scale:"space",defaultScale:I.space}},V.padding.p=V.padding.padding,V.padding.pt=V.padding.paddingTop,V.padding.pr=V.padding.paddingRight,V.padding.pb=V.padding.paddingBottom,V.padding.pl=V.padding.paddingLeft,V.padding.px=V.padding.paddingX,V.padding.py=V.padding.paddingY;var H=h(V.margin),B=h(V.padding),W=m(H,B),U=h({boxShadow:{property:"boxShadow",scale:"shadows"},textShadow:{property:"textShadow",scale:"shadows"}}),$=U,Y=n(5),Q=function(e){var t,n,r=e.scale,o=e.prop,i=void 0===o?"variant":o,a=e.variants,l=void 0===a?{}:a,u=e.key;(n=Object.keys(l).length?function(e,t,n){return Object(Y.default)(c(t,e,null))(n.theme)}:function(e,t){return c(t,e,null)}).scale=r||u,n.defaults=l;var f=((t={})[i]=n,t);return s(f)},X=Q({key:"buttons"}),G=Q({key:"textStyles",prop:"textStyle"}),q=Q({key:"colorStyles",prop:"colors"}),K=v.width,Z=v.height,J=v.minWidth,ee=v.minHeight,te=v.maxWidth,ne=v.maxHeight,re=v.size,oe=v.verticalAlign,ie=v.display,ae=v.overflow,le=v.overflowX,ue=v.overflowY,ce=w.opacity,se=E.fontSize,fe=E.fontFamily,pe=E.fontWeight,de=E.lineHeight,he=E.textAlign,me=E.fontStyle,ge=E.letterSpacing,ve=S.alignItems,be=S.alignContent,ye=S.justifyItems,we=S.justifyContent,xe=S.flexWrap,Ee=S.flexDirection,ke=S.flex,Se=S.flexGrow,Oe=S.flexShrink,Ce=S.flexBasis,Te=S.justifySelf,_e=S.alignSelf,Pe=S.order,Ae=T.gridGap,Me=T.gridColumnGap,ze=T.gridRowGap,Fe=T.gridColumn,je=T.gridRow,De=T.gridAutoFlow,Re=T.gridAutoColumns,Ie=T.gridAutoRows,Le=T.gridTemplateColumns,Ne=T.gridTemplateRows,Ve=T.gridTemplateAreas,He=T.gridArea,Be=A.borderWidth,We=A.borderStyle,Ue=A.borderColor,$e=A.borderTop,Ye=A.borderRight,Qe=A.borderBottom,Xe=A.borderLeft,Ge=A.borderRadius,qe=F.backgroundImage,Ke=F.backgroundSize,Ze=F.backgroundPosition,Je=F.backgroundRepeat,et=R.zIndex,tt=R.top,nt=R.right,rt=R.bottom,ot=R.left,it=function(e){var t=e.prop,n=e.cssProperty,r=e.alias,o=e.key,i=e.transformValue,a=e.scale,l=e.properties,u={};return u[t]=d({properties:l,property:n||t,scale:o,defaultScale:a,transform:i}),r&&(u[r]=u[t]),s(u)}},function(e,t,n){"use strict"; +/* +object-assign +(c) Sindre Sorhus +@license MIT +*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;function a(e){if(null==e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map((function(e){return t[e]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(e){r[e]=e})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,l,u=a(e),c=1;c<arguments.length;c++){for(var s in n=Object(arguments[c]))o.call(n,s)&&(u[s]=n[s]);if(r){l=r(n);for(var f=0;f<l.length;f++)i.call(n,l[f])&&(u[l[f]]=n[l[f]])}}return u}},function(e,t,n){"use strict";function r(){return(r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}n.r(t),n.d(t,"get",(function(){return o})),n.d(t,"responsive",(function(){return p})),n.d(t,"css",(function(){return d}));var o=function(e,t,n,r,o){for(t=t&&t.split?t.split("."):[t],r=0;r<t.length;r++)e=e?e[t[r]]:o;return e===o?n:e},i=[40,52,64].map((function(e){return e+"em"})),a={space:[0,4,8,16,32,64,128,256,512],fontSizes:[12,14,16,20,24,32,48,64,72]},l={bg:"backgroundColor",m:"margin",mt:"marginTop",mr:"marginRight",mb:"marginBottom",ml:"marginLeft",mx:"marginX",my:"marginY",p:"padding",pt:"paddingTop",pr:"paddingRight",pb:"paddingBottom",pl:"paddingLeft",px:"paddingX",py:"paddingY"},u={marginX:["marginLeft","marginRight"],marginY:["marginTop","marginBottom"],paddingX:["paddingLeft","paddingRight"],paddingY:["paddingTop","paddingBottom"],size:["width","height"]},c={color:"colors",backgroundColor:"colors",borderColor:"colors",margin:"space",marginTop:"space",marginRight:"space",marginBottom:"space",marginLeft:"space",marginX:"space",marginY:"space",padding:"space",paddingTop:"space",paddingRight:"space",paddingBottom:"space",paddingLeft:"space",paddingX:"space",paddingY:"space",top:"space",right:"space",bottom:"space",left:"space",gridGap:"space",gridColumnGap:"space",gridRowGap:"space",gap:"space",columnGap:"space",rowGap:"space",fontFamily:"fonts",fontSize:"fontSizes",fontWeight:"fontWeights",lineHeight:"lineHeights",letterSpacing:"letterSpacings",border:"borders",borderTop:"borders",borderRight:"borders",borderBottom:"borders",borderLeft:"borders",borderWidth:"borderWidths",borderStyle:"borderStyles",borderRadius:"radii",borderTopRightRadius:"radii",borderTopLeftRadius:"radii",borderBottomRightRadius:"radii",borderBottomLeftRadius:"radii",borderTopWidth:"borderWidths",borderTopColor:"colors",borderTopStyle:"borderStyles",borderBottomWidth:"borderWidths",borderBottomColor:"colors",borderBottomStyle:"borderStyles",borderLeftWidth:"borderWidths",borderLeftColor:"colors",borderLeftStyle:"borderStyles",borderRightWidth:"borderWidths",borderRightColor:"colors",borderRightStyle:"borderStyles",outlineColor:"colors",boxShadow:"shadows",textShadow:"shadows",zIndex:"zIndices",width:"sizes",minWidth:"sizes",maxWidth:"sizes",height:"sizes",minHeight:"sizes",maxHeight:"sizes",flexBasis:"sizes",size:"sizes",fill:"colors",stroke:"colors"},s=function(e,t){if("number"!=typeof t||t>=0)return o(e,t,t);var n=Math.abs(t),r=o(e,n,n);return"string"==typeof r?"-"+r:-1*r},f=["margin","marginTop","marginRight","marginBottom","marginLeft","marginX","marginY","top","bottom","left","right"].reduce((function(e,t){var n;return r({},e,((n={})[t]=s,n))}),{}),p=function(e){return function(t){var n={},r=o(t,"breakpoints",i),a=[null].concat(r.map((function(e){return"@media screen and (min-width: "+e+")"})));for(var l in e){var u="function"==typeof e[l]?e[l](t):e[l];if(null!=u)if(Array.isArray(u))for(var c=0;c<u.slice(0,a.length).length;c++){var s=a[c];s?(n[s]=n[s]||{},null!=u[c]&&(n[s][l]=u[c])):n[l]=u[c]}else n[l]=u}return n}},d=function e(t){return function(n){void 0===n&&(n={});var i=r({},a,{},n.theme||n),s={},d="function"==typeof t?t(i):t,h=p(d)(i);for(var m in h){var g=h[m],v="function"==typeof g?g(i):g;if("variant"!==m)if(v&&"object"==typeof v)s[m]=e(v)(i);else{var b=o(l,m,m),y=o(c,b),w=o(i,y,o(i,b,{})),x=o(f,b,o)(w,v,v);if(u[b])for(var E=u[b],k=0;k<E.length;k++)s[E[k]]=x;else s[b]=x}else s=r({},s,{},e(o(i,v))(i))}return s}};t.default=d},function(e,t,n){"use strict";var r=n(8);t.a=function(){for(var e=arguments.length,t=new Array(e),n=0;n<e;n++)t[n]=arguments[n];return Object(r.a)(t)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Flex=t.Box=void 0;u(n(0));var r=u(n(11)),o=n(3),i=function(e){if(e&&e.__esModule)return e;var t=l();if(t&&t.has(e))return t.get(e);var n={};if(null!=e){var r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if(Object.prototype.hasOwnProperty.call(e,o)){var i=r?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}}n.default=e,t&&t.set(e,n);return n}(n(5)),a=u(n(31));function l(){if("function"!=typeof WeakMap)return null;var e=new WeakMap;return l=function(){return e},e}function u(e){return e&&e.__esModule?e:{default:e}}var c=(0,r.default)("div",{shouldForwardProp:a.default})({boxSizing:"border-box",margin:0,minWidth:0},(function(e){return(0,i.default)(e.__css)(e.theme)}),(function(e){var t=e.theme,n=e.variant,r=e.tx,o=void 0===r?"variants":r;return(0,i.default)((0,i.get)(t,o+"."+n,(0,i.get)(t,n)))(t)}),(function(e){return(0,i.default)(e.sx)(e.theme)}),(function(e){return e.css}),(0,o.compose)(o.space,o.layout,o.typography,o.color,o.flexbox));t.Box=c;var s=(0,r.default)(c)({display:"flex"});t.Flex=s},function(e,t,n){"use strict";n.d(t,"a",(function(){return m}));var r=function(e){for(var t,n=0,r=0,o=e.length;o>=4;++r,o-=4)t=1540483477*(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))+(59797*(t>>>16)<<16),n=1540483477*(65535&(t^=t>>>24))+(59797*(t>>>16)<<16)^1540483477*(65535&n)+(59797*(n>>>16)<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n=1540483477*(65535&(n^=255&e.charCodeAt(r)))+(59797*(n>>>16)<<16)}return(((n=1540483477*(65535&(n^=n>>>13))+(59797*(n>>>16)<<16))^n>>>15)>>>0).toString(36)},o={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},i=n(12),a=/[A-Z]|^ms/g,l=/_EMO_([^_]+?)_([^]*?)_EMO_/g,u=function(e){return 45===e.charCodeAt(1)},c=function(e){return null!=e&&"boolean"!=typeof e},s=Object(i.a)((function(e){return u(e)?e:e.replace(a,"-$&").toLowerCase()})),f=function(e,t){switch(e){case"animation":case"animationName":if("string"==typeof t)return t.replace(l,(function(e,t,n){return d={name:t,styles:n,next:d},t}))}return 1===o[e]||u(e)||"number"!=typeof t||0===t?t:t+"px"};function p(e,t,n,r){if(null==n)return"";if(void 0!==n.__emotion_styles)return n;switch(typeof n){case"boolean":return"";case"object":if(1===n.anim)return d={name:n.name,styles:n.styles,next:d},n.name;if(void 0!==n.styles){var o=n.next;if(void 0!==o)for(;void 0!==o;)d={name:o.name,styles:o.styles,next:d},o=o.next;return n.styles+";"}return function(e,t,n){var r="";if(Array.isArray(n))for(var o=0;o<n.length;o++)r+=p(e,t,n[o],!1);else for(var i in n){var a=n[i];if("object"!=typeof a)null!=t&&void 0!==t[a]?r+=i+"{"+t[a]+"}":c(a)&&(r+=s(i)+":"+f(i,a)+";");else if(!Array.isArray(a)||"string"!=typeof a[0]||null!=t&&void 0!==t[a[0]]){var l=p(e,t,a,!1);switch(i){case"animation":case"animationName":r+=s(i)+":"+l+";";break;default:r+=i+"{"+l+"}"}}else for(var u=0;u<a.length;u++)c(a[u])&&(r+=s(i)+":"+f(i,a[u])+";")}return r}(e,t,n);case"function":if(void 0!==e){var i=d,a=n(e);return d=i,p(e,t,a,r)}break;case"string":}if(null==t)return n;var l=t[n];return void 0===l||r?n:l}var d,h=/label:\s*([^\s;\n{]+)\s*;/g;var m=function(e,t,n){if(1===e.length&&"object"==typeof e[0]&&null!==e[0]&&void 0!==e[0].styles)return e[0];var o=!0,i="";d=void 0;var a=e[0];null==a||void 0===a.raw?(o=!1,i+=p(n,t,a,!1)):i+=a[0];for(var l=1;l<e.length;l++)i+=p(n,t,e[l],46===i.charCodeAt(i.length-1)),o&&(i+=a[l]);h.lastIndex=0;for(var u,c="";null!==(u=h.exec(i));)c+="-"+u[1];return{name:r(i)+c,styles:i,next:d}}},function(e,t,n){"use strict";n.d(t,"a",(function(){return r})),n.d(t,"b",(function(){return o}));function r(e,t,n){var r="";return n.split(" ").forEach((function(n){void 0!==e[n]?t.push(e[n]):r+=n+" "})),r}var o=function(e,t,n){var r=e.key+"-"+t.name;if(!1===n&&void 0===e.registered[r]&&(e.registered[r]=t.styles),void 0===e.inserted[t.name]){var o=t;do{e.insert("."+r,o,e.sheet,!0);o=o.next}while(void 0!==o)}}},function(e,t,n){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE){0;try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}}(),e.exports=n(28)},function(e,t,n){"use strict";n.r(t);var r=n(14),o=n.n(r),i=n(0),a=n(15),l=n(1),u=n(9),c=n(8),s=a.a,f=function(e){return"theme"!==e&&"innerRef"!==e},p=function(e){return"string"==typeof e&&e.charCodeAt(0)>96?s:f};function d(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function h(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?d(n,!0).forEach((function(t){o()(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):d(n).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}var m=function e(t,n){var r,o,a;void 0!==n&&(r=n.label,a=n.target,o=t.__emotion_forwardProp&&n.shouldForwardProp?function(e){return t.__emotion_forwardProp(e)&&n.shouldForwardProp(e)}:n.shouldForwardProp);var s=t.__emotion_real===t,f=s&&t.__emotion_base||t;"function"!=typeof o&&s&&(o=t.__emotion_forwardProp);var d=o||p(f),m=!d("as");return function(){var g=arguments,v=s&&void 0!==t.__emotion_styles?t.__emotion_styles.slice(0):[];if(void 0!==r&&v.push("label:"+r+";"),null==g[0]||void 0===g[0].raw)v.push.apply(v,g);else{0,v.push(g[0][0]);for(var b=g.length,y=1;y<b;y++)v.push(g[y],g[0][y])}var w=Object(l.f)((function(e,t,n){return Object(i.createElement)(l.c.Consumer,null,(function(r){var l=m&&e.as||f,s="",h=[],g=e;if(null==e.theme){for(var b in g={},e)g[b]=e[b];g.theme=r}"string"==typeof e.className?s=Object(u.a)(t.registered,h,e.className):null!=e.className&&(s=e.className+" ");var y=Object(c.a)(v.concat(h),t.registered,g);Object(u.b)(t,y,"string"==typeof l);s+=t.key+"-"+y.name,void 0!==a&&(s+=" "+a);var w=m&&void 0===o?p(l):d,x={};for(var E in e)m&&"as"===E||w(E)&&(x[E]=e[E]);return x.className=s,x.ref=n||e.innerRef,Object(i.createElement)(l,x)}))}));return w.displayName=void 0!==r?r:"Styled("+("string"==typeof f?f:f.displayName||f.name||"Component")+")",w.defaultProps=t.defaultProps,w.__emotion_real=w,w.__emotion_base=f,w.__emotion_styles=v,w.__emotion_forwardProp=o,Object.defineProperty(w,"toString",{value:function(){return"."+a}}),w.withComponent=function(t,r){return e(t,void 0!==r?h({},n||{},{},r):n).apply(void 0,v)},w}}.bind();["a","abbr","address","area","article","aside","audio","b","base","bdi","bdo","big","blockquote","body","br","button","canvas","caption","cite","code","col","colgroup","data","datalist","dd","del","details","dfn","dialog","div","dl","dt","em","embed","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","head","header","hgroup","hr","html","i","iframe","img","input","ins","kbd","keygen","label","legend","li","link","main","map","mark","marquee","menu","menuitem","meta","meter","nav","noscript","object","ol","optgroup","option","output","p","param","picture","pre","progress","q","rp","rt","ruby","s","samp","script","section","select","small","source","span","strong","style","sub","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","title","tr","track","u","ul","var","video","wbr","circle","clipPath","defs","ellipse","foreignObject","g","image","line","linearGradient","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","svg","text","tspan"].forEach((function(e){m[e]=m(e)}));t.default=m},function(e,t,n){"use strict";t.a=function(e){var t={};return function(n){return void 0===t[n]&&(t[n]=e(n)),t[n]}}},function(e,t,n){"use strict";(function(e,r){function o(e){return(o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function i(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function l(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{},r=Object.keys(n);"function"==typeof Object.getOwnPropertySymbols&&(r=r.concat(Object.getOwnPropertySymbols(n).filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable})))),r.forEach((function(t){a(e,t,n[t])}))}return e}function u(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=[],r=!0,o=!1,i=void 0;try{for(var a,l=e[Symbol.iterator]();!(r=(a=l.next()).done)&&(n.push(a.value),!t||n.length!==t);r=!0);}catch(e){o=!0,i=e}finally{try{r||null==l.return||l.return()}finally{if(o)throw i}}return n}(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance")}()}n.d(t,"a",(function(){return Se})),n.d(t,"b",(function(){return xe})),n.d(t,"c",(function(){return ke}));var c=function(){},s={},f={},p={mark:c,measure:c};try{"undefined"!=typeof window&&(s=window),"undefined"!=typeof document&&(f=document),"undefined"!=typeof MutationObserver&&MutationObserver,"undefined"!=typeof performance&&(p=performance)}catch(e){}var d=(s.navigator||{}).userAgent,h=void 0===d?"":d,m=s,g=f,v=p,b=(m.document,!!g.documentElement&&!!g.head&&"function"==typeof g.addEventListener&&"function"==typeof g.createElement),y=(~h.indexOf("MSIE")||h.indexOf("Trident/"),function(){try{}catch(e){return!1}}(),[1,2,3,4,5,6,7,8,9,10]),w=y.concat([11,12,13,14,15,16,17,18,19,20]),x={GROUP:"group",SWAP_OPACITY:"swap-opacity",PRIMARY:"primary",SECONDARY:"secondary"},E=(["xs","sm","lg","fw","ul","li","border","pull-left","pull-right","spin","pulse","rotate-90","rotate-180","rotate-270","flip-horizontal","flip-vertical","flip-both","stack","stack-1x","stack-2x","inverse","layers","layers-text","layers-counter",x.GROUP,x.SWAP_OPACITY,x.PRIMARY,x.SECONDARY].concat(y.map((function(e){return"".concat(e,"x")}))).concat(w.map((function(e){return"w-".concat(e)}))),m.FontAwesomeConfig||{});if(g&&"function"==typeof g.querySelector){[["data-family-prefix","familyPrefix"],["data-replacement-class","replacementClass"],["data-auto-replace-svg","autoReplaceSvg"],["data-auto-add-css","autoAddCss"],["data-auto-a11y","autoA11y"],["data-search-pseudo-elements","searchPseudoElements"],["data-observe-mutations","observeMutations"],["data-mutate-approach","mutateApproach"],["data-keep-original-source","keepOriginalSource"],["data-measure-performance","measurePerformance"],["data-show-missing-icons","showMissingIcons"]].forEach((function(e){var t=u(e,2),n=t[0],r=t[1],o=function(e){return""===e||"false"!==e&&("true"===e||e)}(function(e){var t=g.querySelector("script["+e+"]");if(t)return t.getAttribute(e)}(n));null!=o&&(E[r]=o)}))}var k=l({},{familyPrefix:"fa",replacementClass:"svg-inline--fa",autoReplaceSvg:!0,autoAddCss:!0,autoA11y:!0,searchPseudoElements:!1,observeMutations:!0,mutateApproach:"async",keepOriginalSource:!0,measurePerformance:!1,showMissingIcons:!0},E);k.autoReplaceSvg||(k.observeMutations=!1);var S=l({},k);m.FontAwesomeConfig=S;var O=m||{};O.___FONT_AWESOME___||(O.___FONT_AWESOME___={}),O.___FONT_AWESOME___.styles||(O.___FONT_AWESOME___.styles={}),O.___FONT_AWESOME___.hooks||(O.___FONT_AWESOME___.hooks={}),O.___FONT_AWESOME___.shims||(O.___FONT_AWESOME___.shims=[]);var C=O.___FONT_AWESOME___,T=[];b&&((g.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(g.readyState)||g.addEventListener("DOMContentLoaded",(function e(){g.removeEventListener("DOMContentLoaded",e),1,T.map((function(e){return e()}))})));var _,P=function(){},A=void 0!==e&&void 0!==e.process&&"function"==typeof e.process.emit,M=void 0===r?setTimeout:r,z=[];function F(){for(var e=0;e<z.length;e++)z[e][0](z[e][1]);z=[],_=!1}function j(e,t){z.push([e,t]),_||(_=!0,M(F,0))}function D(e){var t=e.owner,n=t._state,r=t._data,o=e[n],i=e.then;if("function"==typeof o){n="fulfilled";try{r=o(r)}catch(e){N(i,e)}}R(i,r)||("fulfilled"===n&&I(i,r),"rejected"===n&&N(i,r))}function R(e,t){var n;try{if(e===t)throw new TypeError("A promises callback cannot return that same promise.");if(t&&("function"==typeof t||"object"===o(t))){var r=t.then;if("function"==typeof r)return r.call(t,(function(r){n||(n=!0,t===r?L(e,r):I(e,r))}),(function(t){n||(n=!0,N(e,t))})),!0}}catch(t){return n||N(e,t),!0}return!1}function I(e,t){e!==t&&R(e,t)||L(e,t)}function L(e,t){"pending"===e._state&&(e._state="settled",e._data=t,j(H,e))}function N(e,t){"pending"===e._state&&(e._state="settled",e._data=t,j(B,e))}function V(e){e._then=e._then.forEach(D)}function H(e){e._state="fulfilled",V(e)}function B(t){t._state="rejected",V(t),!t._handled&&A&&e.process.emit("unhandledRejection",t._data,t)}function W(t){e.process.emit("rejectionHandled",t)}function U(e){if("function"!=typeof e)throw new TypeError("Promise resolver "+e+" is not a function");if(this instanceof U==!1)throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");this._then=[],function(e,t){function n(e){N(t,e)}try{e((function(e){I(t,e)}),n)}catch(e){n(e)}}(e,this)}U.prototype={constructor:U,_state:"pending",_then:null,_data:void 0,_handled:!1,then:function(e,t){var n={owner:this,then:new this.constructor(P),fulfilled:e,rejected:t};return!t&&!e||this._handled||(this._handled=!0,"rejected"===this._state&&A&&j(W,this)),"fulfilled"===this._state||"rejected"===this._state?j(D,n):this._then.push(n),n.then},catch:function(e){return this.then(null,e)}},U.all=function(e){if(!Array.isArray(e))throw new TypeError("You must pass an array to Promise.all().");return new U((function(t,n){var r=[],o=0;function i(e){return o++,function(n){r[e]=n,--o||t(r)}}for(var a,l=0;l<e.length;l++)(a=e[l])&&"function"==typeof a.then?a.then(i(l),n):r[l]=a;o||t(r)}))},U.race=function(e){if(!Array.isArray(e))throw new TypeError("You must pass an array to Promise.race().");return new U((function(t,n){for(var r,o=0;o<e.length;o++)(r=e[o])&&"function"==typeof r.then?r.then(t,n):t(r)}))},U.resolve=function(e){return e&&"object"===o(e)&&e.constructor===U?e:new U((function(t){t(e)}))},U.reject=function(e){return new U((function(t,n){n(e)}))};var $={size:16,x:0,y:0,rotate:0,flipX:!1,flipY:!1};function Y(e){if(e&&b){var t=g.createElement("style");t.setAttribute("type","text/css"),t.innerHTML=e;for(var n=g.head.childNodes,r=null,o=n.length-1;o>-1;o--){var i=n[o],a=(i.tagName||"").toUpperCase();["STYLE","LINK"].indexOf(a)>-1&&(r=i)}return g.head.insertBefore(t,r),e}}function Q(){for(var e=12,t="";e-- >0;)t+="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"[62*Math.random()|0];return t}function X(e){return"".concat(e).replace(/&/g,"&").replace(/"/g,""").replace(/'/g,"'").replace(/</g,"<").replace(/>/g,">")}function G(e){return Object.keys(e||{}).reduce((function(t,n){return t+"".concat(n,": ").concat(e[n],";")}),"")}function q(e){return e.size!==$.size||e.x!==$.x||e.y!==$.y||e.rotate!==$.rotate||e.flipX||e.flipY}function K(e){var t=e.transform,n=e.containerWidth,r=e.iconWidth,o={transform:"translate(".concat(n/2," 256)")},i="translate(".concat(32*t.x,", ").concat(32*t.y,") "),a="scale(".concat(t.size/16*(t.flipX?-1:1),", ").concat(t.size/16*(t.flipY?-1:1),") "),l="rotate(".concat(t.rotate," 0 0)");return{outer:o,inner:{transform:"".concat(i," ").concat(a," ").concat(l)},path:{transform:"translate(".concat(r/2*-1," -256)")}}}var Z={x:0,y:0,width:"100%",height:"100%"};function J(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e.attributes&&(e.attributes.fill||t)&&(e.attributes.fill="black"),e}function ee(e){var t=e.icons,n=t.main,r=t.mask,o=e.prefix,i=e.iconName,a=e.transform,u=e.symbol,c=e.title,s=e.maskId,f=e.titleId,p=e.extra,d=e.watchable,h=void 0!==d&&d,m=r.found?r:n,g=m.width,v=m.height,b="fa-w-".concat(Math.ceil(g/v*16)),y=[S.replacementClass,i?"".concat(S.familyPrefix,"-").concat(i):"",b].filter((function(e){return-1===p.classes.indexOf(e)})).concat(p.classes).join(" "),w={children:[],attributes:l({},p.attributes,{"data-prefix":o,"data-icon":i,class:y,role:p.attributes.role||"img",xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 ".concat(g," ").concat(v)})};h&&(w.attributes["data-fa-i2svg"]=""),c&&w.children.push({tag:"title",attributes:{id:w.attributes["aria-labelledby"]||"title-".concat(f||Q())},children:[c]});var x=l({},w,{prefix:o,iconName:i,main:n,mask:r,maskId:s,transform:a,symbol:u,styles:p.styles}),E=r.found&&n.found?function(e){var t,n=e.children,r=e.attributes,o=e.main,i=e.mask,a=e.maskId,u=e.transform,c=o.width,s=o.icon,f=i.width,p=i.icon,d=K({transform:u,containerWidth:f,iconWidth:c}),h={tag:"rect",attributes:l({},Z,{fill:"white"})},m=s.children?{children:s.children.map(J)}:{},g={tag:"g",attributes:l({},d.inner),children:[J(l({tag:s.tag,attributes:l({},s.attributes,d.path)},m))]},v={tag:"g",attributes:l({},d.outer),children:[g]},b="mask-".concat(a||Q()),y="clip-".concat(a||Q()),w={tag:"mask",attributes:l({},Z,{id:b,maskUnits:"userSpaceOnUse",maskContentUnits:"userSpaceOnUse"}),children:[h,v]},x={tag:"defs",children:[{tag:"clipPath",attributes:{id:y},children:(t=p,"g"===t.tag?t.children:[t])},w]};return n.push(x,{tag:"rect",attributes:l({fill:"currentColor","clip-path":"url(#".concat(y,")"),mask:"url(#".concat(b,")")},Z)}),{children:n,attributes:r}}(x):function(e){var t=e.children,n=e.attributes,r=e.main,o=e.transform,i=G(e.styles);if(i.length>0&&(n.style=i),q(o)){var a=K({transform:o,containerWidth:r.width,iconWidth:r.width});t.push({tag:"g",attributes:l({},a.outer),children:[{tag:"g",attributes:l({},a.inner),children:[{tag:r.icon.tag,children:r.icon.children,attributes:l({},r.icon.attributes,a.path)}]}]})}else t.push(r.icon);return{children:t,attributes:n}}(x),k=E.children,O=E.attributes;return x.children=k,x.attributes=O,u?function(e){var t=e.prefix,n=e.iconName,r=e.children,o=e.attributes,i=e.symbol;return[{tag:"svg",attributes:{style:"display: none;"},children:[{tag:"symbol",attributes:l({},o,{id:!0===i?"".concat(t,"-").concat(S.familyPrefix,"-").concat(n):i}),children:r}]}]}(x):function(e){var t=e.children,n=e.main,r=e.mask,o=e.attributes,i=e.styles,a=e.transform;if(q(a)&&n.found&&!r.found){var u={x:n.width/n.height/2,y:.5};o.style=G(l({},i,{"transform-origin":"".concat(u.x+a.x/16,"em ").concat(u.y+a.y/16,"em")}))}return[{tag:"svg",attributes:o,children:t}]}(x)}var te=function(){},ne=(S.measurePerformance&&v&&v.mark&&v.measure,function(e,t,n,r){var o,i,a,l=Object.keys(e),u=l.length,c=void 0!==r?function(e,t){return function(n,r,o,i){return e.call(t,n,r,o,i)}}(t,r):t;for(void 0===n?(o=1,a=e[l[0]]):(o=0,a=n);o<u;o++)a=c(a,e[i=l[o]],i,e);return a});function re(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.skipHooks,o=void 0!==r&&r,i=Object.keys(t).reduce((function(e,n){var r=t[n];return!!r.icon?e[r.iconName]=r.icon:e[n]=r,e}),{});"function"!=typeof C.hooks.addPack||o?C.styles[e]=l({},C.styles[e]||{},i):C.hooks.addPack(e,i),"fas"===e&&re("fa",t)}var oe=C.styles,ie=C.shims,ae=function(){var e=function(e){return ne(oe,(function(t,n,r){return t[r]=ne(n,e,{}),t}),{})};e((function(e,t,n){return t[3]&&(e[t[3]]=n),e})),e((function(e,t,n){var r=t[2];return e[n]=n,r.forEach((function(t){e[t]=n})),e}));var t="far"in oe;ne(ie,(function(e,n){var r=n[0],o=n[1],i=n[2];return"far"!==o||t||(o="fas"),e[r]={prefix:o,iconName:i},e}),{})};ae();C.styles;function le(e,t,n){if(e&&e[t]&&e[t][n])return{prefix:t,iconName:n,icon:e[t][n]}}function ue(e){var t=e.tag,n=e.attributes,r=void 0===n?{}:n,o=e.children,i=void 0===o?[]:o;return"string"==typeof e?X(e):"<".concat(t," ").concat(function(e){return Object.keys(e||{}).reduce((function(t,n){return t+"".concat(n,'="').concat(X(e[n]),'" ')}),"").trim()}(r),">").concat(i.map(ue).join(""),"</").concat(t,">")}var ce=function(e){var t={size:16,x:0,y:0,flipX:!1,flipY:!1,rotate:0};return e?e.toLowerCase().split(" ").reduce((function(e,t){var n=t.toLowerCase().split("-"),r=n[0],o=n.slice(1).join("-");if(r&&"h"===o)return e.flipX=!0,e;if(r&&"v"===o)return e.flipY=!0,e;if(o=parseFloat(o),isNaN(o))return e;switch(r){case"grow":e.size=e.size+o;break;case"shrink":e.size=e.size-o;break;case"left":e.x=e.x-o;break;case"right":e.x=e.x+o;break;case"up":e.y=e.y-o;break;case"down":e.y=e.y+o;break;case"rotate":e.rotate=e.rotate+o}return e}),t):t};function se(e){this.name="MissingIcon",this.message=e||"Icon unavailable",this.stack=(new Error).stack}se.prototype=Object.create(Error.prototype),se.prototype.constructor=se;var fe={fill:"currentColor"},pe={attributeType:"XML",repeatCount:"indefinite",dur:"2s"},de={tag:"path",attributes:l({},fe,{d:"M156.5,447.7l-12.6,29.5c-18.7-9.5-35.9-21.2-51.5-34.9l22.7-22.7C127.6,430.5,141.5,440,156.5,447.7z M40.6,272H8.5 c1.4,21.2,5.4,41.7,11.7,61.1L50,321.2C45.1,305.5,41.8,289,40.6,272z M40.6,240c1.4-18.8,5.2-37,11.1-54.1l-29.5-12.6 C14.7,194.3,10,216.7,8.5,240H40.6z M64.3,156.5c7.8-14.9,17.2-28.8,28.1-41.5L69.7,92.3c-13.7,15.6-25.5,32.8-34.9,51.5 L64.3,156.5z M397,419.6c-13.9,12-29.4,22.3-46.1,30.4l11.9,29.8c20.7-9.9,39.8-22.6,56.9-37.6L397,419.6z M115,92.4 c13.9-12,29.4-22.3,46.1-30.4l-11.9-29.8c-20.7,9.9-39.8,22.6-56.8,37.6L115,92.4z M447.7,355.5c-7.8,14.9-17.2,28.8-28.1,41.5 l22.7,22.7c13.7-15.6,25.5-32.9,34.9-51.5L447.7,355.5z M471.4,272c-1.4,18.8-5.2,37-11.1,54.1l29.5,12.6 c7.5-21.1,12.2-43.5,13.6-66.8H471.4z M321.2,462c-15.7,5-32.2,8.2-49.2,9.4v32.1c21.2-1.4,41.7-5.4,61.1-11.7L321.2,462z M240,471.4c-18.8-1.4-37-5.2-54.1-11.1l-12.6,29.5c21.1,7.5,43.5,12.2,66.8,13.6V471.4z M462,190.8c5,15.7,8.2,32.2,9.4,49.2h32.1 c-1.4-21.2-5.4-41.7-11.7-61.1L462,190.8z M92.4,397c-12-13.9-22.3-29.4-30.4-46.1l-29.8,11.9c9.9,20.7,22.6,39.8,37.6,56.9 L92.4,397z M272,40.6c18.8,1.4,36.9,5.2,54.1,11.1l12.6-29.5C317.7,14.7,295.3,10,272,8.5V40.6z M190.8,50 c15.7-5,32.2-8.2,49.2-9.4V8.5c-21.2,1.4-41.7,5.4-61.1,11.7L190.8,50z M442.3,92.3L419.6,115c12,13.9,22.3,29.4,30.5,46.1 l29.8-11.9C470,128.5,457.3,109.4,442.3,92.3z M397,92.4l22.7-22.7c-15.6-13.7-32.8-25.5-51.5-34.9l-12.6,29.5 C370.4,72.1,384.4,81.5,397,92.4z"})},he=l({},pe,{attributeName:"opacity"});l({},fe,{cx:"256",cy:"364",r:"28"}),l({},pe,{attributeName:"r",values:"28;14;28;28;14;28;"}),l({},he,{values:"1;0;1;1;0;1;"}),l({},fe,{opacity:"1",d:"M263.7,312h-16c-6.6,0-12-5.4-12-12c0-71,77.4-63.9,77.4-107.8c0-20-17.8-40.2-57.4-40.2c-29.1,0-44.3,9.6-59.2,28.7 c-3.9,5-11.1,6-16.2,2.4l-13.1-9.2c-5.6-3.9-6.9-11.8-2.6-17.2c21.2-27.2,46.4-44.7,91.2-44.7c52.3,0,97.4,29.8,97.4,80.2 c0,67.6-77.4,63.5-77.4,107.8C275.7,306.6,270.3,312,263.7,312z"}),l({},he,{values:"1;0;0;0;0;1;"}),l({},fe,{opacity:"0",d:"M232.5,134.5l7,168c0.3,6.4,5.6,11.5,12,11.5h9c6.4,0,11.7-5.1,12-11.5l7-168c0.3-6.8-5.2-12.5-12-12.5h-23 C237.7,122,232.2,127.7,232.5,134.5z"}),l({},he,{values:"0;0;1;1;0;0;"}),C.styles;function me(e){var t=e[0],n=e[1],r=u(e.slice(4),1)[0];return{found:!0,width:t,height:n,icon:Array.isArray(r)?{tag:"g",attributes:{class:"".concat(S.familyPrefix,"-").concat(x.GROUP)},children:[{tag:"path",attributes:{class:"".concat(S.familyPrefix,"-").concat(x.SECONDARY),fill:"currentColor",d:r[0]}},{tag:"path",attributes:{class:"".concat(S.familyPrefix,"-").concat(x.PRIMARY),fill:"currentColor",d:r[1]}}]}:{tag:"path",attributes:{fill:"currentColor",d:r}}}}C.styles;function ge(){var e="svg-inline--fa",t=S.familyPrefix,n=S.replacementClass,r='svg:not(:root).svg-inline--fa {\n overflow: visible;\n}\n\n.svg-inline--fa {\n display: inline-block;\n font-size: inherit;\n height: 1em;\n overflow: visible;\n vertical-align: -0.125em;\n}\n.svg-inline--fa.fa-lg {\n vertical-align: -0.225em;\n}\n.svg-inline--fa.fa-w-1 {\n width: 0.0625em;\n}\n.svg-inline--fa.fa-w-2 {\n width: 0.125em;\n}\n.svg-inline--fa.fa-w-3 {\n width: 0.1875em;\n}\n.svg-inline--fa.fa-w-4 {\n width: 0.25em;\n}\n.svg-inline--fa.fa-w-5 {\n width: 0.3125em;\n}\n.svg-inline--fa.fa-w-6 {\n width: 0.375em;\n}\n.svg-inline--fa.fa-w-7 {\n width: 0.4375em;\n}\n.svg-inline--fa.fa-w-8 {\n width: 0.5em;\n}\n.svg-inline--fa.fa-w-9 {\n width: 0.5625em;\n}\n.svg-inline--fa.fa-w-10 {\n width: 0.625em;\n}\n.svg-inline--fa.fa-w-11 {\n width: 0.6875em;\n}\n.svg-inline--fa.fa-w-12 {\n width: 0.75em;\n}\n.svg-inline--fa.fa-w-13 {\n width: 0.8125em;\n}\n.svg-inline--fa.fa-w-14 {\n width: 0.875em;\n}\n.svg-inline--fa.fa-w-15 {\n width: 0.9375em;\n}\n.svg-inline--fa.fa-w-16 {\n width: 1em;\n}\n.svg-inline--fa.fa-w-17 {\n width: 1.0625em;\n}\n.svg-inline--fa.fa-w-18 {\n width: 1.125em;\n}\n.svg-inline--fa.fa-w-19 {\n width: 1.1875em;\n}\n.svg-inline--fa.fa-w-20 {\n width: 1.25em;\n}\n.svg-inline--fa.fa-pull-left {\n margin-right: 0.3em;\n width: auto;\n}\n.svg-inline--fa.fa-pull-right {\n margin-left: 0.3em;\n width: auto;\n}\n.svg-inline--fa.fa-border {\n height: 1.5em;\n}\n.svg-inline--fa.fa-li {\n width: 2em;\n}\n.svg-inline--fa.fa-fw {\n width: 1.25em;\n}\n\n.fa-layers svg.svg-inline--fa {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n}\n\n.fa-layers {\n display: inline-block;\n height: 1em;\n position: relative;\n text-align: center;\n vertical-align: -0.125em;\n width: 1em;\n}\n.fa-layers svg.svg-inline--fa {\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-counter, .fa-layers-text {\n display: inline-block;\n position: absolute;\n text-align: center;\n}\n\n.fa-layers-text {\n left: 50%;\n top: 50%;\n -webkit-transform: translate(-50%, -50%);\n transform: translate(-50%, -50%);\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-counter {\n background-color: #ff253a;\n border-radius: 1em;\n -webkit-box-sizing: border-box;\n box-sizing: border-box;\n color: #fff;\n height: 1.5em;\n line-height: 1;\n max-width: 5em;\n min-width: 1.5em;\n overflow: hidden;\n padding: 0.25em;\n right: 0;\n text-overflow: ellipsis;\n top: 0;\n -webkit-transform: scale(0.25);\n transform: scale(0.25);\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-bottom-right {\n bottom: 0;\n right: 0;\n top: auto;\n -webkit-transform: scale(0.25);\n transform: scale(0.25);\n -webkit-transform-origin: bottom right;\n transform-origin: bottom right;\n}\n\n.fa-layers-bottom-left {\n bottom: 0;\n left: 0;\n right: auto;\n top: auto;\n -webkit-transform: scale(0.25);\n transform: scale(0.25);\n -webkit-transform-origin: bottom left;\n transform-origin: bottom left;\n}\n\n.fa-layers-top-right {\n right: 0;\n top: 0;\n -webkit-transform: scale(0.25);\n transform: scale(0.25);\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-top-left {\n left: 0;\n right: auto;\n top: 0;\n -webkit-transform: scale(0.25);\n transform: scale(0.25);\n -webkit-transform-origin: top left;\n transform-origin: top left;\n}\n\n.fa-lg {\n font-size: 1.3333333333em;\n line-height: 0.75em;\n vertical-align: -0.0667em;\n}\n\n.fa-xs {\n font-size: 0.75em;\n}\n\n.fa-sm {\n font-size: 0.875em;\n}\n\n.fa-1x {\n font-size: 1em;\n}\n\n.fa-2x {\n font-size: 2em;\n}\n\n.fa-3x {\n font-size: 3em;\n}\n\n.fa-4x {\n font-size: 4em;\n}\n\n.fa-5x {\n font-size: 5em;\n}\n\n.fa-6x {\n font-size: 6em;\n}\n\n.fa-7x {\n font-size: 7em;\n}\n\n.fa-8x {\n font-size: 8em;\n}\n\n.fa-9x {\n font-size: 9em;\n}\n\n.fa-10x {\n font-size: 10em;\n}\n\n.fa-fw {\n text-align: center;\n width: 1.25em;\n}\n\n.fa-ul {\n list-style-type: none;\n margin-left: 2.5em;\n padding-left: 0;\n}\n.fa-ul > li {\n position: relative;\n}\n\n.fa-li {\n left: -2em;\n position: absolute;\n text-align: center;\n width: 2em;\n line-height: inherit;\n}\n\n.fa-border {\n border: solid 0.08em #eee;\n border-radius: 0.1em;\n padding: 0.2em 0.25em 0.15em;\n}\n\n.fa-pull-left {\n float: left;\n}\n\n.fa-pull-right {\n float: right;\n}\n\n.fa.fa-pull-left,\n.fas.fa-pull-left,\n.far.fa-pull-left,\n.fal.fa-pull-left,\n.fab.fa-pull-left {\n margin-right: 0.3em;\n}\n.fa.fa-pull-right,\n.fas.fa-pull-right,\n.far.fa-pull-right,\n.fal.fa-pull-right,\n.fab.fa-pull-right {\n margin-left: 0.3em;\n}\n\n.fa-spin {\n -webkit-animation: fa-spin 2s infinite linear;\n animation: fa-spin 2s infinite linear;\n}\n\n.fa-pulse {\n -webkit-animation: fa-spin 1s infinite steps(8);\n animation: fa-spin 1s infinite steps(8);\n}\n\n@-webkit-keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n\n@keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n.fa-rotate-90 {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";\n -webkit-transform: rotate(90deg);\n transform: rotate(90deg);\n}\n\n.fa-rotate-180 {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";\n -webkit-transform: rotate(180deg);\n transform: rotate(180deg);\n}\n\n.fa-rotate-270 {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";\n -webkit-transform: rotate(270deg);\n transform: rotate(270deg);\n}\n\n.fa-flip-horizontal {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";\n -webkit-transform: scale(-1, 1);\n transform: scale(-1, 1);\n}\n\n.fa-flip-vertical {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";\n -webkit-transform: scale(1, -1);\n transform: scale(1, -1);\n}\n\n.fa-flip-both, .fa-flip-horizontal.fa-flip-vertical {\n -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";\n -webkit-transform: scale(-1, -1);\n transform: scale(-1, -1);\n}\n\n:root .fa-rotate-90,\n:root .fa-rotate-180,\n:root .fa-rotate-270,\n:root .fa-flip-horizontal,\n:root .fa-flip-vertical,\n:root .fa-flip-both {\n -webkit-filter: none;\n filter: none;\n}\n\n.fa-stack {\n display: inline-block;\n height: 2em;\n position: relative;\n width: 2.5em;\n}\n\n.fa-stack-1x,\n.fa-stack-2x {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n}\n\n.svg-inline--fa.fa-stack-1x {\n height: 1em;\n width: 1.25em;\n}\n.svg-inline--fa.fa-stack-2x {\n height: 2em;\n width: 2.5em;\n}\n\n.fa-inverse {\n color: #fff;\n}\n\n.sr-only {\n border: 0;\n clip: rect(0, 0, 0, 0);\n height: 1px;\n margin: -1px;\n overflow: hidden;\n padding: 0;\n position: absolute;\n width: 1px;\n}\n\n.sr-only-focusable:active, .sr-only-focusable:focus {\n clip: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n position: static;\n width: auto;\n}\n\n.svg-inline--fa .fa-primary {\n fill: var(--fa-primary-color, currentColor);\n opacity: 1;\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa .fa-secondary {\n fill: var(--fa-secondary-color, currentColor);\n opacity: 0.4;\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-primary {\n opacity: 0.4;\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-secondary {\n opacity: 1;\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa mask .fa-primary,\n.svg-inline--fa mask .fa-secondary {\n fill: black;\n}\n\n.fad.fa-inverse {\n color: #fff;\n}';if("fa"!==t||n!==e){var o=new RegExp("\\.".concat("fa","\\-"),"g"),i=new RegExp("\\--".concat("fa","\\-"),"g"),a=new RegExp("\\.".concat(e),"g");r=r.replace(o,".".concat(t,"-")).replace(i,"--".concat(t,"-")).replace(a,".".concat(n))}return r}function ve(){S.autoAddCss&&!Ee&&(Y(ge()),Ee=!0)}function be(e,t){return Object.defineProperty(e,"abstract",{get:t}),Object.defineProperty(e,"html",{get:function(){return e.abstract.map((function(e){return ue(e)}))}}),Object.defineProperty(e,"node",{get:function(){if(b){var t=g.createElement("div");return t.innerHTML=e.html,t.children}}}),e}function ye(e){var t=e.prefix,n=void 0===t?"fa":t,r=e.iconName;if(r)return le(xe.definitions,n,r)||le(C.styles,n,r)}var we,xe=new(function(){function e(){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.definitions={}}var t,n,r;return t=e,(n=[{key:"add",value:function(){for(var e=this,t=arguments.length,n=new Array(t),r=0;r<t;r++)n[r]=arguments[r];var o=n.reduce(this._pullDefinitions,{});Object.keys(o).forEach((function(t){e.definitions[t]=l({},e.definitions[t]||{},o[t]),re(t,o[t]),ae()}))}},{key:"reset",value:function(){this.definitions={}}},{key:"_pullDefinitions",value:function(e,t){var n=t.prefix&&t.iconName&&t.icon?{0:t}:t;return Object.keys(n).map((function(t){var r=n[t],o=r.prefix,i=r.iconName,a=r.icon;e[o]||(e[o]={}),e[o][i]=a})),e}}])&&i(t.prototype,n),r&&i(t,r),e}()),Ee=!1,ke={transform:function(e){return ce(e)}},Se=(we=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.transform,r=void 0===n?$:n,o=t.symbol,i=void 0!==o&&o,a=t.mask,u=void 0===a?null:a,c=t.maskId,s=void 0===c?null:c,f=t.title,p=void 0===f?null:f,d=t.titleId,h=void 0===d?null:d,m=t.classes,g=void 0===m?[]:m,v=t.attributes,b=void 0===v?{}:v,y=t.styles,w=void 0===y?{}:y;if(e){var x=e.prefix,E=e.iconName,k=e.icon;return be(l({type:"icon"},e),(function(){return ve(),S.autoA11y&&(p?b["aria-labelledby"]="".concat(S.replacementClass,"-title-").concat(h||Q()):(b["aria-hidden"]="true",b.focusable="false")),ee({icons:{main:me(k),mask:u?me(u.icon):{found:!1,width:null,height:null,icon:{}}},prefix:x,iconName:E,transform:l({},$,r),symbol:i,title:p,maskId:s,titleId:h,extra:{attributes:b,styles:w,classes:g}})}))}},function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=(e||{}).icon?e:ye(e||{}),r=t.mask;return r&&(r=(r||{}).icon?r:ye(r||{})),we(n,l({},t,{mask:r}))})}).call(this,n(19),n(35).setImmediate)},function(e,t){e.exports=function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}},function(e,t,n){"use strict";var r=n(12),o=/^((children|dangerouslySetInnerHTML|key|ref|autoFocus|defaultValue|defaultChecked|innerHTML|suppressContentEditableWarning|suppressHydrationWarning|valueLink|accept|acceptCharset|accessKey|action|allow|allowUserMedia|allowPaymentRequest|allowFullScreen|allowTransparency|alt|async|autoComplete|autoPlay|capture|cellPadding|cellSpacing|challenge|charSet|checked|cite|classID|className|cols|colSpan|content|contentEditable|contextMenu|controls|controlsList|coords|crossOrigin|data|dateTime|decoding|default|defer|dir|disabled|disablePictureInPicture|download|draggable|encType|form|formAction|formEncType|formMethod|formNoValidate|formTarget|frameBorder|headers|height|hidden|high|href|hrefLang|htmlFor|httpEquiv|id|inputMode|integrity|is|keyParams|keyType|kind|label|lang|list|loading|loop|low|marginHeight|marginWidth|max|maxLength|media|mediaGroup|method|min|minLength|multiple|muted|name|nonce|noValidate|open|optimum|pattern|placeholder|playsInline|poster|preload|profile|radioGroup|readOnly|referrerPolicy|rel|required|reversed|role|rows|rowSpan|sandbox|scope|scoped|scrolling|seamless|selected|shape|size|sizes|slot|span|spellCheck|src|srcDoc|srcLang|srcSet|start|step|style|summary|tabIndex|target|title|type|useMap|value|width|wmode|wrap|about|datatype|inlist|prefix|property|resource|typeof|vocab|autoCapitalize|autoCorrect|autoSave|color|inert|itemProp|itemScope|itemType|itemID|itemRef|on|results|security|unselectable|accentHeight|accumulate|additive|alignmentBaseline|allowReorder|alphabetic|amplitude|arabicForm|ascent|attributeName|attributeType|autoReverse|azimuth|baseFrequency|baselineShift|baseProfile|bbox|begin|bias|by|calcMode|capHeight|clip|clipPathUnits|clipPath|clipRule|colorInterpolation|colorInterpolationFilters|colorProfile|colorRendering|contentScriptType|contentStyleType|cursor|cx|cy|d|decelerate|descent|diffuseConstant|direction|display|divisor|dominantBaseline|dur|dx|dy|edgeMode|elevation|enableBackground|end|exponent|externalResourcesRequired|fill|fillOpacity|fillRule|filter|filterRes|filterUnits|floodColor|floodOpacity|focusable|fontFamily|fontSize|fontSizeAdjust|fontStretch|fontStyle|fontVariant|fontWeight|format|from|fr|fx|fy|g1|g2|glyphName|glyphOrientationHorizontal|glyphOrientationVertical|glyphRef|gradientTransform|gradientUnits|hanging|horizAdvX|horizOriginX|ideographic|imageRendering|in|in2|intercept|k|k1|k2|k3|k4|kernelMatrix|kernelUnitLength|kerning|keyPoints|keySplines|keyTimes|lengthAdjust|letterSpacing|lightingColor|limitingConeAngle|local|markerEnd|markerMid|markerStart|markerHeight|markerUnits|markerWidth|mask|maskContentUnits|maskUnits|mathematical|mode|numOctaves|offset|opacity|operator|order|orient|orientation|origin|overflow|overlinePosition|overlineThickness|panose1|paintOrder|pathLength|patternContentUnits|patternTransform|patternUnits|pointerEvents|points|pointsAtX|pointsAtY|pointsAtZ|preserveAlpha|preserveAspectRatio|primitiveUnits|r|radius|refX|refY|renderingIntent|repeatCount|repeatDur|requiredExtensions|requiredFeatures|restart|result|rotate|rx|ry|scale|seed|shapeRendering|slope|spacing|specularConstant|specularExponent|speed|spreadMethod|startOffset|stdDeviation|stemh|stemv|stitchTiles|stopColor|stopOpacity|strikethroughPosition|strikethroughThickness|string|stroke|strokeDasharray|strokeDashoffset|strokeLinecap|strokeLinejoin|strokeMiterlimit|strokeOpacity|strokeWidth|surfaceScale|systemLanguage|tableValues|targetX|targetY|textAnchor|textDecoration|textRendering|textLength|to|transform|u1|u2|underlinePosition|underlineThickness|unicode|unicodeBidi|unicodeRange|unitsPerEm|vAlphabetic|vHanging|vIdeographic|vMathematical|values|vectorEffect|version|vertAdvY|vertOriginX|vertOriginY|viewBox|viewTarget|visibility|widths|wordSpacing|writingMode|x|xHeight|x1|x2|xChannelSelector|xlinkActuate|xlinkArcrole|xlinkHref|xlinkRole|xlinkShow|xlinkTitle|xlinkType|xmlBase|xmlns|xmlnsXlink|xmlLang|xmlSpace|y|y1|y2|yChannelSelector|z|zoomAndPan|for|class|autofocus)|(([Dd][Aa][Tt][Aa]|[Aa][Rr][Ii][Aa]|x)-.*))$/,i=Object(r.a)((function(e){return o.test(e)||111===e.charCodeAt(0)&&110===e.charCodeAt(1)&&e.charCodeAt(2)<91}));t.a=i},function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var r=function(){function e(e){this.isSpeedy=void 0===e.speedy||e.speedy,this.tags=[],this.ctr=0,this.nonce=e.nonce,this.key=e.key,this.container=e.container,this.before=null}var t=e.prototype;return t.insert=function(e){if(this.ctr%(this.isSpeedy?65e3:1)==0){var t,n=function(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),t}(this);t=0===this.tags.length?this.before:this.tags[this.tags.length-1].nextSibling,this.container.insertBefore(n,t),this.tags.push(n)}var r=this.tags[this.tags.length-1];if(this.isSpeedy){var o=function(e){if(e.sheet)return e.sheet;for(var t=0;t<document.styleSheets.length;t++)if(document.styleSheets[t].ownerNode===e)return document.styleSheets[t]}(r);try{var i=105===e.charCodeAt(1)&&64===e.charCodeAt(0);o.insertRule(e,i?0:o.cssRules.length)}catch(e){0}}else r.appendChild(document.createTextNode(e));this.ctr++},t.flush=function(){this.tags.forEach((function(e){return e.parentNode.removeChild(e)})),this.tags=[],this.ctr=0},e}()},function(e,t,n){"use strict";t.a=function(e){var t=new WeakMap;return function(n){if(t.has(n))return t.get(n);var r=e(n);return t.set(n,r),r}}},function(e,t,n){"use strict";var r=n(16);var o=function(e){function t(e,t,r){var o=t.trim().split(h);t=o;var i=o.length,a=e.length;switch(a){case 0:case 1:var l=0;for(e=0===a?"":e[0]+" ";l<i;++l)t[l]=n(e,t[l],r).trim();break;default:var u=l=0;for(t=[];l<i;++l)for(var c=0;c<a;++c)t[u++]=n(e[c]+" ",o[l],r).trim()}return t}function n(e,t,n){var r=t.charCodeAt(0);switch(33>r&&(r=(t=t.trim()).charCodeAt(0)),r){case 38:return t.replace(m,"$1"+e.trim());case 58:return e.trim()+t.replace(m,"$1"+e.trim());default:if(0<1*n&&0<t.indexOf("\f"))return t.replace(m,(58===e.charCodeAt(0)?"":"$1")+e.trim())}return e+t}function r(e,t,n,i){var a=e+";",l=2*t+3*n+4*i;if(944===l){e=a.indexOf(":",9)+1;var u=a.substring(e,a.length-1).trim();return u=a.substring(0,e).trim()+u+";",1===P||2===P&&o(u,1)?"-webkit-"+u+u:u}if(0===P||2===P&&!o(a,1))return a;switch(l){case 1015:return 97===a.charCodeAt(10)?"-webkit-"+a+a:a;case 951:return 116===a.charCodeAt(3)?"-webkit-"+a+a:a;case 963:return 110===a.charCodeAt(5)?"-webkit-"+a+a:a;case 1009:if(100!==a.charCodeAt(4))break;case 969:case 942:return"-webkit-"+a+a;case 978:return"-webkit-"+a+"-moz-"+a+a;case 1019:case 983:return"-webkit-"+a+"-moz-"+a+"-ms-"+a+a;case 883:if(45===a.charCodeAt(8))return"-webkit-"+a+a;if(0<a.indexOf("image-set(",11))return a.replace(O,"$1-webkit-$2")+a;break;case 932:if(45===a.charCodeAt(4))switch(a.charCodeAt(5)){case 103:return"-webkit-box-"+a.replace("-grow","")+"-webkit-"+a+"-ms-"+a.replace("grow","positive")+a;case 115:return"-webkit-"+a+"-ms-"+a.replace("shrink","negative")+a;case 98:return"-webkit-"+a+"-ms-"+a.replace("basis","preferred-size")+a}return"-webkit-"+a+"-ms-"+a+a;case 964:return"-webkit-"+a+"-ms-flex-"+a+a;case 1023:if(99!==a.charCodeAt(8))break;return"-webkit-box-pack"+(u=a.substring(a.indexOf(":",15)).replace("flex-","").replace("space-between","justify"))+"-webkit-"+a+"-ms-flex-pack"+u+a;case 1005:return p.test(a)?a.replace(f,":-webkit-")+a.replace(f,":-moz-")+a:a;case 1e3:switch(t=(u=a.substring(13).trim()).indexOf("-")+1,u.charCodeAt(0)+u.charCodeAt(t)){case 226:u=a.replace(y,"tb");break;case 232:u=a.replace(y,"tb-rl");break;case 220:u=a.replace(y,"lr");break;default:return a}return"-webkit-"+a+"-ms-"+u+a;case 1017:if(-1===a.indexOf("sticky",9))break;case 975:switch(t=(a=e).length-10,l=(u=(33===a.charCodeAt(t)?a.substring(0,t):a).substring(e.indexOf(":",7)+1).trim()).charCodeAt(0)+(0|u.charCodeAt(7))){case 203:if(111>u.charCodeAt(8))break;case 115:a=a.replace(u,"-webkit-"+u)+";"+a;break;case 207:case 102:a=a.replace(u,"-webkit-"+(102<l?"inline-":"")+"box")+";"+a.replace(u,"-webkit-"+u)+";"+a.replace(u,"-ms-"+u+"box")+";"+a}return a+";";case 938:if(45===a.charCodeAt(5))switch(a.charCodeAt(6)){case 105:return u=a.replace("-items",""),"-webkit-"+a+"-webkit-box-"+u+"-ms-flex-"+u+a;case 115:return"-webkit-"+a+"-ms-flex-item-"+a.replace(E,"")+a;default:return"-webkit-"+a+"-ms-flex-line-pack"+a.replace("align-content","").replace(E,"")+a}break;case 973:case 989:if(45!==a.charCodeAt(3)||122===a.charCodeAt(4))break;case 931:case 953:if(!0===S.test(e))return 115===(u=e.substring(e.indexOf(":")+1)).charCodeAt(0)?r(e.replace("stretch","fill-available"),t,n,i).replace(":fill-available",":stretch"):a.replace(u,"-webkit-"+u)+a.replace(u,"-moz-"+u.replace("fill-",""))+a;break;case 962:if(a="-webkit-"+a+(102===a.charCodeAt(5)?"-ms-"+a:"")+a,211===n+i&&105===a.charCodeAt(13)&&0<a.indexOf("transform",10))return a.substring(0,a.indexOf(";",27)+1).replace(d,"$1-webkit-$2")+a}return a}function o(e,t){var n=e.indexOf(1===t?":":"{"),r=e.substring(0,3!==t?n:10);return n=e.substring(n+1,e.length-1),F(2!==t?r:r.replace(k,"$1"),n,t)}function i(e,t){var n=r(t,t.charCodeAt(0),t.charCodeAt(1),t.charCodeAt(2));return n!==t+";"?n.replace(x," or ($1)").substring(4):"("+t+")"}function a(e,t,n,r,o,i,a,l,c,s){for(var f,p=0,d=t;p<z;++p)switch(f=M[p].call(u,e,d,n,r,o,i,a,l,c,s)){case void 0:case!1:case!0:case null:break;default:d=f}if(d!==t)return d}function l(e){return void 0!==(e=e.prefix)&&(F=null,e?"function"!=typeof e?P=1:(P=2,F=e):P=0),l}function u(e,n){var l=e;if(33>l.charCodeAt(0)&&(l=l.trim()),l=[l],0<z){var u=a(-1,n,l,l,T,C,0,0,0,0);void 0!==u&&"string"==typeof u&&(n=u)}var f=function e(n,l,u,f,p){for(var d,h,m,y,x,E=0,k=0,S=0,O=0,M=0,F=0,D=m=d=0,R=0,I=0,L=0,N=0,V=u.length,H=V-1,B="",W="",U="",$="";R<V;){if(h=u.charCodeAt(R),R===H&&0!==k+O+S+E&&(0!==k&&(h=47===k?10:47),O=S=E=0,V++,H++),0===k+O+S+E){if(R===H&&(0<I&&(B=B.replace(s,"")),0<B.trim().length)){switch(h){case 32:case 9:case 59:case 13:case 10:break;default:B+=u.charAt(R)}h=59}switch(h){case 123:for(d=(B=B.trim()).charCodeAt(0),m=1,N=++R;R<V;){switch(h=u.charCodeAt(R)){case 123:m++;break;case 125:m--;break;case 47:switch(h=u.charCodeAt(R+1)){case 42:case 47:e:{for(D=R+1;D<H;++D)switch(u.charCodeAt(D)){case 47:if(42===h&&42===u.charCodeAt(D-1)&&R+2!==D){R=D+1;break e}break;case 10:if(47===h){R=D+1;break e}}R=D}}break;case 91:h++;case 40:h++;case 34:case 39:for(;R++<H&&u.charCodeAt(R)!==h;);}if(0===m)break;R++}switch(m=u.substring(N,R),0===d&&(d=(B=B.replace(c,"").trim()).charCodeAt(0)),d){case 64:switch(0<I&&(B=B.replace(s,"")),h=B.charCodeAt(1)){case 100:case 109:case 115:case 45:I=l;break;default:I=A}if(N=(m=e(l,I,m,h,p+1)).length,0<z&&(x=a(3,m,I=t(A,B,L),l,T,C,N,h,p,f),B=I.join(""),void 0!==x&&0===(N=(m=x.trim()).length)&&(h=0,m="")),0<N)switch(h){case 115:B=B.replace(w,i);case 100:case 109:case 45:m=B+"{"+m+"}";break;case 107:m=(B=B.replace(g,"$1 $2"))+"{"+m+"}",m=1===P||2===P&&o("@"+m,3)?"@-webkit-"+m+"@"+m:"@"+m;break;default:m=B+m,112===f&&(W+=m,m="")}else m="";break;default:m=e(l,t(l,B,L),m,f,p+1)}U+=m,m=L=I=D=d=0,B="",h=u.charCodeAt(++R);break;case 125:case 59:if(1<(N=(B=(0<I?B.replace(s,""):B).trim()).length))switch(0===D&&(d=B.charCodeAt(0),45===d||96<d&&123>d)&&(N=(B=B.replace(" ",":")).length),0<z&&void 0!==(x=a(1,B,l,n,T,C,W.length,f,p,f))&&0===(N=(B=x.trim()).length)&&(B="\0\0"),d=B.charCodeAt(0),h=B.charCodeAt(1),d){case 0:break;case 64:if(105===h||99===h){$+=B+u.charAt(R);break}default:58!==B.charCodeAt(N-1)&&(W+=r(B,d,h,B.charCodeAt(2)))}L=I=D=d=0,B="",h=u.charCodeAt(++R)}}switch(h){case 13:case 10:47===k?k=0:0===1+d&&107!==f&&0<B.length&&(I=1,B+="\0"),0<z*j&&a(0,B,l,n,T,C,W.length,f,p,f),C=1,T++;break;case 59:case 125:if(0===k+O+S+E){C++;break}default:switch(C++,y=u.charAt(R),h){case 9:case 32:if(0===O+E+k)switch(M){case 44:case 58:case 9:case 32:y="";break;default:32!==h&&(y=" ")}break;case 0:y="\\0";break;case 12:y="\\f";break;case 11:y="\\v";break;case 38:0===O+k+E&&(I=L=1,y="\f"+y);break;case 108:if(0===O+k+E+_&&0<D)switch(R-D){case 2:112===M&&58===u.charCodeAt(R-3)&&(_=M);case 8:111===F&&(_=F)}break;case 58:0===O+k+E&&(D=R);break;case 44:0===k+S+O+E&&(I=1,y+="\r");break;case 34:case 39:0===k&&(O=O===h?0:0===O?h:O);break;case 91:0===O+k+S&&E++;break;case 93:0===O+k+S&&E--;break;case 41:0===O+k+E&&S--;break;case 40:if(0===O+k+E){if(0===d)switch(2*M+3*F){case 533:break;default:d=1}S++}break;case 64:0===k+S+O+E+D+m&&(m=1);break;case 42:case 47:if(!(0<O+E+S))switch(k){case 0:switch(2*h+3*u.charCodeAt(R+1)){case 235:k=47;break;case 220:N=R,k=42}break;case 42:47===h&&42===M&&N+2!==R&&(33===u.charCodeAt(N+2)&&(W+=u.substring(N,R+1)),y="",k=0)}}0===k&&(B+=y)}F=M,M=h,R++}if(0<(N=W.length)){if(I=l,0<z&&(void 0!==(x=a(2,W,I,n,T,C,N,f,p,f))&&0===(W=x).length))return $+W+U;if(W=I.join(",")+"{"+W+"}",0!=P*_){switch(2!==P||o(W,2)||(_=0),_){case 111:W=W.replace(b,":-moz-$1")+W;break;case 112:W=W.replace(v,"::-webkit-input-$1")+W.replace(v,"::-moz-$1")+W.replace(v,":-ms-input-$1")+W}_=0}}return $+W+U}(A,l,n,0,0);return 0<z&&(void 0!==(u=a(-2,f,l,l,T,C,f.length,0,0,0))&&(f=u)),"",_=0,C=T=1,f}var c=/^\0+/g,s=/[\0\r\f]/g,f=/: */g,p=/zoo|gra/,d=/([,: ])(transform)/g,h=/,\r+?/g,m=/([\t\r\n ])*\f?&/g,g=/@(k\w+)\s*(\S*)\s*/,v=/::(place)/g,b=/:(read-only)/g,y=/[svh]\w+-[tblr]{2}/,w=/\(\s*(.*)\s*\)/g,x=/([\s\S]*?);/g,E=/-self|flex-/g,k=/[^]*?(:[rp][el]a[\w-]+)[^]*/,S=/stretch|:\s*\w+\-(?:conte|avail)/,O=/([^-])(image-set\()/,C=1,T=1,_=0,P=1,A=[],M=[],z=0,F=null,j=0;return u.use=function e(t){switch(t){case void 0:case null:z=M.length=0;break;default:if("function"==typeof t)M[z++]=t;else if("object"==typeof t)for(var n=0,r=t.length;n<r;++n)e(t[n]);else j=0|!!t}return e},u.set=l,void 0!==e&&l(e),u};n(17);function i(e){e&&a.current.insert(e+"}")}var a={current:null},l=function(e,t,n,r,o,l,u,c,s,f){switch(e){case 1:switch(t.charCodeAt(0)){case 64:return a.current.insert(t+";"),"";case 108:if(98===t.charCodeAt(2))return""}break;case 2:if(0===c)return t+"/*|*/";break;case 3:switch(c){case 102:case 112:return a.current.insert(n[0]+t),"";default:return t+(0===f?"/*|*/":"")}case-2:t.split("/*|*/}").forEach(i)}};t.a=function(e){void 0===e&&(e={});var t,n=e.key||"css";void 0!==e.prefix&&(t={prefix:e.prefix});var i=new o(t);var u,c={};u=e.container||document.head;var s,f=document.querySelectorAll("style[data-emotion-"+n+"]");Array.prototype.forEach.call(f,(function(e){e.getAttribute("data-emotion-"+n).split(" ").forEach((function(e){c[e]=!0})),e.parentNode!==u&&u.appendChild(e)})),i.use(e.stylisPlugins)(l),s=function(e,t,n,r){var o=t.name;a.current=n,i(e,t.styles),r&&(p.inserted[o]=!0)};var p={key:n,sheet:new r.a({key:n,container:u,nonce:e.nonce,speedy:e.speedy}),nonce:e.nonce,inserted:c,registered:{},insert:s};return p}},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},o=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),i=n(0),a=u(i),l=u(n(2));function u(e){return e&&e.__esModule?e:{default:e}}var c={position:"absolute",top:0,left:0,visibility:"hidden",height:0,overflow:"scroll",whiteSpace:"pre"},s=["extraWidth","injectStyles","inputClassName","inputRef","inputStyle","minWidth","onAutosize","placeholderIsMinWidth"],f=function(e,t){t.style.fontSize=e.fontSize,t.style.fontFamily=e.fontFamily,t.style.fontWeight=e.fontWeight,t.style.fontStyle=e.fontStyle,t.style.letterSpacing=e.letterSpacing,t.style.textTransform=e.textTransform},p=!("undefined"==typeof window||!window.navigator)&&/MSIE |Trident\/|Edge\//.test(window.navigator.userAgent),d=function(){return p?"_"+Math.random().toString(36).substr(2,12):void 0},h=function(e){function t(e){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);var n=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,(t.__proto__||Object.getPrototypeOf(t)).call(this,e));return n.inputRef=function(e){n.input=e,"function"==typeof n.props.inputRef&&n.props.inputRef(e)},n.placeHolderSizerRef=function(e){n.placeHolderSizer=e},n.sizerRef=function(e){n.sizer=e},n.state={inputWidth:e.minWidth,inputId:e.id||d()},n}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),o(t,[{key:"componentDidMount",value:function(){this.mounted=!0,this.copyInputStyles(),this.updateInputWidth()}},{key:"UNSAFE_componentWillReceiveProps",value:function(e){var t=e.id;t!==this.props.id&&this.setState({inputId:t||d()})}},{key:"componentDidUpdate",value:function(e,t){t.inputWidth!==this.state.inputWidth&&"function"==typeof this.props.onAutosize&&this.props.onAutosize(this.state.inputWidth),this.updateInputWidth()}},{key:"componentWillUnmount",value:function(){this.mounted=!1}},{key:"copyInputStyles",value:function(){if(this.mounted&&window.getComputedStyle){var e=this.input&&window.getComputedStyle(this.input);e&&(f(e,this.sizer),this.placeHolderSizer&&f(e,this.placeHolderSizer))}}},{key:"updateInputWidth",value:function(){if(this.mounted&&this.sizer&&void 0!==this.sizer.scrollWidth){var e=void 0;e=this.props.placeholder&&(!this.props.value||this.props.value&&this.props.placeholderIsMinWidth)?Math.max(this.sizer.scrollWidth,this.placeHolderSizer.scrollWidth)+2:this.sizer.scrollWidth+2,(e+="number"===this.props.type&&void 0===this.props.extraWidth?16:parseInt(this.props.extraWidth)||0)<this.props.minWidth&&(e=this.props.minWidth),e!==this.state.inputWidth&&this.setState({inputWidth:e})}}},{key:"getInput",value:function(){return this.input}},{key:"focus",value:function(){this.input.focus()}},{key:"blur",value:function(){this.input.blur()}},{key:"select",value:function(){this.input.select()}},{key:"renderStyles",value:function(){var e=this.props.injectStyles;return p&&e?a.default.createElement("style",{dangerouslySetInnerHTML:{__html:"input#"+this.state.inputId+"::-ms-clear {display: none;}"}}):null}},{key:"render",value:function(){var e=[this.props.defaultValue,this.props.value,""].reduce((function(e,t){return null!=e?e:t})),t=r({},this.props.style);t.display||(t.display="inline-block");var n=r({boxSizing:"content-box",width:this.state.inputWidth+"px"},this.props.inputStyle),o=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(this.props,[]);return function(e){s.forEach((function(t){return delete e[t]}))}(o),o.className=this.props.inputClassName,o.id=this.state.inputId,o.style=n,a.default.createElement("div",{className:this.props.className,style:t},this.renderStyles(),a.default.createElement("input",r({},o,{ref:this.inputRef})),a.default.createElement("div",{ref:this.sizerRef,style:c},e),this.props.placeholder?a.default.createElement("div",{ref:this.placeHolderSizerRef,style:c},this.props.placeholder):null)}}]),t}(i.Component);h.propTypes={className:l.default.string,defaultValue:l.default.any,extraWidth:l.default.oneOfType([l.default.number,l.default.string]),id:l.default.string,injectStyles:l.default.bool,inputClassName:l.default.string,inputRef:l.default.func,inputStyle:l.default.object,minWidth:l.default.oneOfType([l.default.number,l.default.string]),onAutosize:l.default.func,onChange:l.default.func,placeholder:l.default.string,placeholderIsMinWidth:l.default.bool,style:l.default.object,value:l.default.any},h.defaultProps={minWidth:1,injectStyles:!0},t.default=h},function(e,t){e.exports=function(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,e.__proto__=t}},function(e,t,n){var r; +/*! + Copyright (c) 2017 Jed Watson. + Licensed under the MIT License (MIT), see + http://jedwatson.github.io/classnames +*/!function(){"use strict";var n={}.hasOwnProperty;function o(){for(var e=[],t=0;t<arguments.length;t++){var r=arguments[t];if(r){var i=typeof r;if("string"===i||"number"===i)e.push(r);else if(Array.isArray(r)&&r.length){var a=o.apply(null,r);a&&e.push(a)}else if("object"===i)for(var l in r)n.call(r,l)&&r[l]&&e.push(l)}}return e.join(" ")}e.exports?(o.default=o,e.exports=o):void 0===(r=function(){return o}.apply(t,[]))||(e.exports=r)}()},function(e,t,n){"use strict";var r,o=this&&this.__extends||(r=function(e,t){return(r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n])})(e,t)},function(e,t){function n(){this.constructor=e}r(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n)}),i=this&&this.__assign||function(){return(i=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},a=this&&this.__createBinding||(Object.create?function(e,t,n,r){void 0===r&&(r=n),Object.defineProperty(e,r,{enumerable:!0,get:function(){return t[n]}})}:function(e,t,n,r){void 0===r&&(r=n),e[r]=t[n]}),l=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:!0,value:t})}:function(e,t){e.default=t}),u=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)"default"!==n&&Object.hasOwnProperty.call(e,n)&&a(t,e,n);return l(t,e),t},c=this&&this.__rest||function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n},s=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0});var f=u(n(0)),p=s(n(32)),d=u(n(2));function h(e){return e&&e.replace(/ |\u202F|\u00A0/g," ")}var m=function(e){function t(){var t=null!==e&&e.apply(this,arguments)||this;return t.lastHtml=t.props.html,t.el="function"==typeof t.props.innerRef?{current:null}:f.createRef(),t.getEl=function(){return(t.props.innerRef&&"function"!=typeof t.props.innerRef?t.props.innerRef:t.el).current},t.emitChange=function(e){var n=t.getEl();if(n){var r=n.innerHTML;if(t.props.onChange&&r!==t.lastHtml){var o=Object.assign({},e,{target:{value:r}});t.props.onChange(o)}t.lastHtml=r}},t}return o(t,e),t.prototype.render=function(){var e=this,t=this.props,n=t.tagName,r=t.html,o=t.innerRef,a=c(t,["tagName","html","innerRef"]);return f.createElement(n||"div",i(i({},a),{ref:"function"==typeof o?function(t){o(t),e.el.current=t}:o||this.el,onInput:this.emitChange,onBlur:this.props.onBlur||this.emitChange,onKeyUp:this.props.onKeyUp||this.emitChange,onKeyDown:this.props.onKeyDown||this.emitChange,contentEditable:!this.props.disabled,dangerouslySetInnerHTML:{__html:r}}),this.props.children)},t.prototype.shouldComponentUpdate=function(e){var t=this.props,n=this.getEl();return!n||(h(e.html)!==h(n.innerHTML)||(t.disabled!==e.disabled||t.tagName!==e.tagName||t.className!==e.className||t.innerRef!==e.innerRef||!p.default(t.style,e.style)))},t.prototype.componentDidUpdate=function(){var e=this.getEl();e&&(this.props.html!==e.innerHTML&&(e.innerHTML=this.props.html),this.lastHtml=this.props.html,function(e){var t=document.createTextNode("");e.appendChild(t);var n=document.activeElement===e;if(null!==t&&null!==t.nodeValue&&n){var r=window.getSelection();if(null!==r){var o=document.createRange();o.setStart(t,t.nodeValue.length),o.collapse(!0),r.removeAllRanges(),r.addRange(o)}e instanceof HTMLElement&&e.focus()}}(e))},t.propTypes={html:d.string.isRequired,onChange:d.func,disabled:d.bool,tagName:d.string,className:d.string,style:d.object,innerRef:d.oneOfType([d.object,d.func])},t}(f.Component);t.default=m},function(e,t){function n(){return e.exports=n=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},n.apply(this,arguments)}e.exports=n},function(e,t,n){"use strict";var r=n(38),o={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},i={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},a={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},l={};function u(e){return r.isMemo(e)?a:l[e.$$typeof]||o}l[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},l[r.Memo]=a;var c=Object.defineProperty,s=Object.getOwnPropertyNames,f=Object.getOwnPropertySymbols,p=Object.getOwnPropertyDescriptor,d=Object.getPrototypeOf,h=Object.prototype;e.exports=function e(t,n,r){if("string"!=typeof n){if(h){var o=d(n);o&&o!==h&&e(t,o,r)}var a=s(n);f&&(a=a.concat(f(n)));for(var l=u(t),m=u(n),g=0;g<a.length;++g){var v=a[g];if(!(i[v]||r&&r[v]||m&&m[v]||l&&l[v])){var b=p(n,v);try{c(t,v,b)}catch(e){}}}}return t}},function(e,t,n){var r,o,i=n(40),a=n(41),l=0,u=0;e.exports=function(e,t,n){var c=t&&n||0,s=t||[],f=(e=e||{}).node||r,p=void 0!==e.clockseq?e.clockseq:o;if(null==f||null==p){var d=i();null==f&&(f=r=[1|d[0],d[1],d[2],d[3],d[4],d[5]]),null==p&&(p=o=16383&(d[6]<<8|d[7]))}var h=void 0!==e.msecs?e.msecs:(new Date).getTime(),m=void 0!==e.nsecs?e.nsecs:u+1,g=h-l+(m-u)/1e4;if(g<0&&void 0===e.clockseq&&(p=p+1&16383),(g<0||h>l)&&void 0===e.nsecs&&(m=0),m>=1e4)throw new Error("uuid.v1(): Can't create more than 10M uuids/sec");l=h,u=m,o=p;var v=(1e4*(268435455&(h+=122192928e5))+m)%4294967296;s[c++]=v>>>24&255,s[c++]=v>>>16&255,s[c++]=v>>>8&255,s[c++]=255&v;var b=h/4294967296*1e4&268435455;s[c++]=b>>>8&255,s[c++]=255&b,s[c++]=b>>>24&15|16,s[c++]=b>>>16&255,s[c++]=p>>>8|128,s[c++]=255&p;for(var y=0;y<6;++y)s[c+y]=f[y];return t||a(s)}},function(e,t,n){"use strict"; +/** @license React v16.13.1 + * react.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var r=n(4),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,l=o?Symbol.for("react.fragment"):60107,u=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,s=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,p=o?Symbol.for("react.forward_ref"):60112,d=o?Symbol.for("react.suspense"):60113,h=o?Symbol.for("react.memo"):60115,m=o?Symbol.for("react.lazy"):60116,g="function"==typeof Symbol&&Symbol.iterator;function v(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n<arguments.length;n++)t+="&args[]="+encodeURIComponent(arguments[n]);return"Minified React error #"+e+"; visit "+t+" for the full message or use the non-minified dev environment for full errors and additional helpful warnings."}var b={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y={};function w(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||b}function x(){}function E(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||b}w.prototype.isReactComponent={},w.prototype.setState=function(e,t){if("object"!=typeof e&&"function"!=typeof e&&null!=e)throw Error(v(85));this.updater.enqueueSetState(this,e,t,"setState")},w.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},x.prototype=w.prototype;var k=E.prototype=new x;k.constructor=E,r(k,w.prototype),k.isPureReactComponent=!0;var S={current:null},O=Object.prototype.hasOwnProperty,C={key:!0,ref:!0,__self:!0,__source:!0};function T(e,t,n){var r,o={},a=null,l=null;if(null!=t)for(r in void 0!==t.ref&&(l=t.ref),void 0!==t.key&&(a=""+t.key),t)O.call(t,r)&&!C.hasOwnProperty(r)&&(o[r]=t[r]);var u=arguments.length-2;if(1===u)o.children=n;else if(1<u){for(var c=Array(u),s=0;s<u;s++)c[s]=arguments[s+2];o.children=c}if(e&&e.defaultProps)for(r in u=e.defaultProps)void 0===o[r]&&(o[r]=u[r]);return{$$typeof:i,type:e,key:a,ref:l,props:o,_owner:S.current}}function _(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var P=/\/+/g,A=[];function M(e,t,n,r){if(A.length){var o=A.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function z(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>A.length&&A.push(e)}function F(e,t,n){return null==e?0:function e(t,n,r,o){var l=typeof t;"undefined"!==l&&"boolean"!==l||(t=null);var u=!1;if(null===t)u=!0;else switch(l){case"string":case"number":u=!0;break;case"object":switch(t.$$typeof){case i:case a:u=!0}}if(u)return r(o,t,""===n?"."+j(t,0):n),1;if(u=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var s=n+j(l=t[c],c);u+=e(l,s,r,o)}else if(null===t||"object"!=typeof t?s=null:s="function"==typeof(s=g&&t[g]||t["@@iterator"])?s:null,"function"==typeof s)for(t=s.call(t),c=0;!(l=t.next()).done;)u+=e(l=l.value,s=n+j(l,c++),r,o);else if("object"===l)throw r=""+t,Error(v(31,"[object Object]"===r?"object with keys {"+Object.keys(t).join(", ")+"}":r,""));return u}(e,"",t,n)}function j(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,(function(e){return t[e]}))}(e.key):t.toString(36)}function D(e,t){e.func.call(e.context,t,e.count++)}function R(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?I(e,r,n,(function(e){return e})):null!=e&&(_(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(P,"$&/")+"/")+n)),r.push(e))}function I(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(P,"$&/")+"/"),F(e,R,t=M(t,i,r,o)),z(t)}var L={current:null};function N(){var e=L.current;if(null===e)throw Error(v(321));return e}var V={ReactCurrentDispatcher:L,ReactCurrentBatchConfig:{suspense:null},ReactCurrentOwner:S,IsSomeRendererActing:{current:!1},assign:r};t.Children={map:function(e,t,n){if(null==e)return e;var r=[];return I(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;F(e,D,t=M(null,null,t,n)),z(t)},count:function(e){return F(e,(function(){return null}),null)},toArray:function(e){var t=[];return I(e,t,null,(function(e){return e})),t},only:function(e){if(!_(e))throw Error(v(143));return e}},t.Component=w,t.Fragment=l,t.Profiler=c,t.PureComponent=E,t.StrictMode=u,t.Suspense=d,t.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=V,t.cloneElement=function(e,t,n){if(null==e)throw Error(v(267,e));var o=r({},e.props),a=e.key,l=e.ref,u=e._owner;if(null!=t){if(void 0!==t.ref&&(l=t.ref,u=S.current),void 0!==t.key&&(a=""+t.key),e.type&&e.type.defaultProps)var c=e.type.defaultProps;for(s in t)O.call(t,s)&&!C.hasOwnProperty(s)&&(o[s]=void 0===t[s]&&void 0!==c?c[s]:t[s])}var s=arguments.length-2;if(1===s)o.children=n;else if(1<s){c=Array(s);for(var f=0;f<s;f++)c[f]=arguments[f+2];o.children=c}return{$$typeof:i,type:e.type,key:a,ref:l,props:o,_owner:u}},t.createContext=function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,_threadCount:0,Provider:null,Consumer:null}).Provider={$$typeof:s,_context:e},e.Consumer=e},t.createElement=T,t.createFactory=function(e){var t=T.bind(null,e);return t.type=e,t},t.createRef=function(){return{current:null}},t.forwardRef=function(e){return{$$typeof:p,render:e}},t.isValidElement=_,t.lazy=function(e){return{$$typeof:m,_ctor:e,_status:-1,_result:null}},t.memo=function(e,t){return{$$typeof:h,type:e,compare:void 0===t?null:t}},t.useCallback=function(e,t){return N().useCallback(e,t)},t.useContext=function(e,t){return N().useContext(e,t)},t.useDebugValue=function(){},t.useEffect=function(e,t){return N().useEffect(e,t)},t.useImperativeHandle=function(e,t,n){return N().useImperativeHandle(e,t,n)},t.useLayoutEffect=function(e,t){return N().useLayoutEffect(e,t)},t.useMemo=function(e,t){return N().useMemo(e,t)},t.useReducer=function(e,t,n){return N().useReducer(e,t,n)},t.useRef=function(e){return N().useRef(e)},t.useState=function(e){return N().useState(e)},t.version="16.13.1"},function(e,t,n){"use strict"; +/** @license React v16.13.1 + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var r=n(0),o=n(4),i=n(29);function a(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n<arguments.length;n++)t+="&args[]="+encodeURIComponent(arguments[n]);return"Minified React error #"+e+"; visit "+t+" for the full message or use the non-minified dev environment for full errors and additional helpful warnings."}if(!r)throw Error(a(227));function l(e,t,n,r,o,i,a,l,u){var c=Array.prototype.slice.call(arguments,3);try{t.apply(n,c)}catch(e){this.onError(e)}}var u=!1,c=null,s=!1,f=null,p={onError:function(e){u=!0,c=e}};function d(e,t,n,r,o,i,a,s,f){u=!1,c=null,l.apply(p,arguments)}var h=null,m=null,g=null;function v(e,t,n){var r=e.type||"unknown-event";e.currentTarget=g(n),function(e,t,n,r,o,i,l,p,h){if(d.apply(this,arguments),u){if(!u)throw Error(a(198));var m=c;u=!1,c=null,s||(s=!0,f=m)}}(r,t,void 0,e),e.currentTarget=null}var b=null,y={};function w(){if(b)for(var e in y){var t=y[e],n=b.indexOf(e);if(!(-1<n))throw Error(a(96,e));if(!E[n]){if(!t.extractEvents)throw Error(a(97,e));for(var r in E[n]=t,n=t.eventTypes){var o=void 0,i=n[r],l=t,u=r;if(k.hasOwnProperty(u))throw Error(a(99,u));k[u]=i;var c=i.phasedRegistrationNames;if(c){for(o in c)c.hasOwnProperty(o)&&x(c[o],l,u);o=!0}else i.registrationName?(x(i.registrationName,l,u),o=!0):o=!1;if(!o)throw Error(a(98,r,e))}}}}function x(e,t,n){if(S[e])throw Error(a(100,e));S[e]=t,O[e]=t.eventTypes[n].dependencies}var E=[],k={},S={},O={};function C(e){var t,n=!1;for(t in e)if(e.hasOwnProperty(t)){var r=e[t];if(!y.hasOwnProperty(t)||y[t]!==r){if(y[t])throw Error(a(102,t));y[t]=r,n=!0}}n&&w()}var T=!("undefined"==typeof window||void 0===window.document||void 0===window.document.createElement),_=null,P=null,A=null;function M(e){if(e=m(e)){if("function"!=typeof _)throw Error(a(280));var t=e.stateNode;t&&(t=h(t),_(e.stateNode,e.type,t))}}function z(e){P?A?A.push(e):A=[e]:P=e}function F(){if(P){var e=P,t=A;if(A=P=null,M(e),t)for(e=0;e<t.length;e++)M(t[e])}}function j(e,t){return e(t)}function D(e,t,n,r,o){return e(t,n,r,o)}function R(){}var I=j,L=!1,N=!1;function V(){null===P&&null===A||(R(),F())}function H(e,t,n){if(N)return e(t,n);N=!0;try{return I(e,t,n)}finally{N=!1,V()}}var B=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,W=Object.prototype.hasOwnProperty,U={},$={};function Y(e,t,n,r,o,i){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=o,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=i}var Q={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach((function(e){Q[e]=new Y(e,0,!1,e,null,!1)})),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach((function(e){var t=e[0];Q[t]=new Y(t,1,!1,e[1],null,!1)})),["contentEditable","draggable","spellCheck","value"].forEach((function(e){Q[e]=new Y(e,2,!1,e.toLowerCase(),null,!1)})),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach((function(e){Q[e]=new Y(e,2,!1,e,null,!1)})),"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach((function(e){Q[e]=new Y(e,3,!1,e.toLowerCase(),null,!1)})),["checked","multiple","muted","selected"].forEach((function(e){Q[e]=new Y(e,3,!0,e,null,!1)})),["capture","download"].forEach((function(e){Q[e]=new Y(e,4,!1,e,null,!1)})),["cols","rows","size","span"].forEach((function(e){Q[e]=new Y(e,6,!1,e,null,!1)})),["rowSpan","start"].forEach((function(e){Q[e]=new Y(e,5,!1,e.toLowerCase(),null,!1)}));var X=/[\-:]([a-z])/g;function G(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach((function(e){var t=e.replace(X,G);Q[t]=new Y(t,1,!1,e,null,!1)})),"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach((function(e){var t=e.replace(X,G);Q[t]=new Y(t,1,!1,e,"http://www.w3.org/1999/xlink",!1)})),["xml:base","xml:lang","xml:space"].forEach((function(e){var t=e.replace(X,G);Q[t]=new Y(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1)})),["tabIndex","crossOrigin"].forEach((function(e){Q[e]=new Y(e,1,!1,e.toLowerCase(),null,!1)})),Q.xlinkHref=new Y("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0),["src","href","action","formAction"].forEach((function(e){Q[e]=new Y(e,1,!1,e.toLowerCase(),null,!0)}));var q=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED;function K(e,t,n,r){var o=Q.hasOwnProperty(t)?Q[t]:null;(null!==o?0===o.type:!r&&(2<t.length&&("o"===t[0]||"O"===t[0])&&("n"===t[1]||"N"===t[1])))||(function(e,t,n,r){if(null==t||function(e,t,n,r){if(null!==n&&0===n.type)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return!r&&(null!==n?!n.acceptsBooleans:"data-"!==(e=e.toLowerCase().slice(0,5))&&"aria-"!==e);default:return!1}}(e,t,n,r))return!0;if(r)return!1;if(null!==n)switch(n.type){case 3:return!t;case 4:return!1===t;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}(t,n,o,r)&&(n=null),r||null===o?function(e){return!!W.call($,e)||!W.call(U,e)&&(B.test(e)?$[e]=!0:(U[e]=!0,!1))}(t)&&(null===n?e.removeAttribute(t):e.setAttribute(t,""+n)):o.mustUseProperty?e[o.propertyName]=null===n?3!==o.type&&"":n:(t=o.attributeName,r=o.attributeNamespace,null===n?e.removeAttribute(t):(n=3===(o=o.type)||4===o&&!0===n?"":""+n,r?e.setAttributeNS(r,t,n):e.setAttribute(t,n))))}q.hasOwnProperty("ReactCurrentDispatcher")||(q.ReactCurrentDispatcher={current:null}),q.hasOwnProperty("ReactCurrentBatchConfig")||(q.ReactCurrentBatchConfig={suspense:null});var Z=/^(.*)[\\\/]/,J="function"==typeof Symbol&&Symbol.for,ee=J?Symbol.for("react.element"):60103,te=J?Symbol.for("react.portal"):60106,ne=J?Symbol.for("react.fragment"):60107,re=J?Symbol.for("react.strict_mode"):60108,oe=J?Symbol.for("react.profiler"):60114,ie=J?Symbol.for("react.provider"):60109,ae=J?Symbol.for("react.context"):60110,le=J?Symbol.for("react.concurrent_mode"):60111,ue=J?Symbol.for("react.forward_ref"):60112,ce=J?Symbol.for("react.suspense"):60113,se=J?Symbol.for("react.suspense_list"):60120,fe=J?Symbol.for("react.memo"):60115,pe=J?Symbol.for("react.lazy"):60116,de=J?Symbol.for("react.block"):60121,he="function"==typeof Symbol&&Symbol.iterator;function me(e){return null===e||"object"!=typeof e?null:"function"==typeof(e=he&&e[he]||e["@@iterator"])?e:null}function ge(e){if(null==e)return null;if("function"==typeof e)return e.displayName||e.name||null;if("string"==typeof e)return e;switch(e){case ne:return"Fragment";case te:return"Portal";case oe:return"Profiler";case re:return"StrictMode";case ce:return"Suspense";case se:return"SuspenseList"}if("object"==typeof e)switch(e.$$typeof){case ae:return"Context.Consumer";case ie:return"Context.Provider";case ue:var t=e.render;return t=t.displayName||t.name||"",e.displayName||(""!==t?"ForwardRef("+t+")":"ForwardRef");case fe:return ge(e.type);case de:return ge(e.render);case pe:if(e=1===e._status?e._result:null)return ge(e)}return null}function ve(e){var t="";do{e:switch(e.tag){case 3:case 4:case 6:case 7:case 10:case 9:var n="";break e;default:var r=e._debugOwner,o=e._debugSource,i=ge(e.type);n=null,r&&(n=ge(r.type)),r=i,i="",o?i=" (at "+o.fileName.replace(Z,"")+":"+o.lineNumber+")":n&&(i=" (created by "+n+")"),n="\n in "+(r||"Unknown")+i}t+=n,e=e.return}while(e);return t}function be(e){switch(typeof e){case"boolean":case"number":case"object":case"string":case"undefined":return e;default:return""}}function ye(e){var t=e.type;return(e=e.nodeName)&&"input"===e.toLowerCase()&&("checkbox"===t||"radio"===t)}function we(e){e._valueTracker||(e._valueTracker=function(e){var t=ye(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&void 0!==n&&"function"==typeof n.get&&"function"==typeof n.set){var o=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return o.call(this)},set:function(e){r=""+e,i.call(this,e)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(e){r=""+e},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}(e))}function xe(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=ye(e)?e.checked?"true":"false":e.value),(e=r)!==n&&(t.setValue(e),!0)}function Ee(e,t){var n=t.checked;return o({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:null!=n?n:e._wrapperState.initialChecked})}function ke(e,t){var n=null==t.defaultValue?"":t.defaultValue,r=null!=t.checked?t.checked:t.defaultChecked;n=be(null!=t.value?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:"checkbox"===t.type||"radio"===t.type?null!=t.checked:null!=t.value}}function Se(e,t){null!=(t=t.checked)&&K(e,"checked",t,!1)}function Oe(e,t){Se(e,t);var n=be(t.value),r=t.type;if(null!=n)"number"===r?(0===n&&""===e.value||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if("submit"===r||"reset"===r)return void e.removeAttribute("value");t.hasOwnProperty("value")?Te(e,t.type,n):t.hasOwnProperty("defaultValue")&&Te(e,t.type,be(t.defaultValue)),null==t.checked&&null!=t.defaultChecked&&(e.defaultChecked=!!t.defaultChecked)}function Ce(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!("submit"!==r&&"reset"!==r||void 0!==t.value&&null!==t.value))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}""!==(n=e.name)&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,""!==n&&(e.name=n)}function Te(e,t,n){"number"===t&&e.ownerDocument.activeElement===e||(null==n?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}function _e(e,t){return e=o({children:void 0},t),(t=function(e){var t="";return r.Children.forEach(e,(function(e){null!=e&&(t+=e)})),t}(t.children))&&(e.children=t),e}function Pe(e,t,n,r){if(e=e.options,t){t={};for(var o=0;o<n.length;o++)t["$"+n[o]]=!0;for(n=0;n<e.length;n++)o=t.hasOwnProperty("$"+e[n].value),e[n].selected!==o&&(e[n].selected=o),o&&r&&(e[n].defaultSelected=!0)}else{for(n=""+be(n),t=null,o=0;o<e.length;o++){if(e[o].value===n)return e[o].selected=!0,void(r&&(e[o].defaultSelected=!0));null!==t||e[o].disabled||(t=e[o])}null!==t&&(t.selected=!0)}}function Ae(e,t){if(null!=t.dangerouslySetInnerHTML)throw Error(a(91));return o({},t,{value:void 0,defaultValue:void 0,children:""+e._wrapperState.initialValue})}function Me(e,t){var n=t.value;if(null==n){if(n=t.children,t=t.defaultValue,null!=n){if(null!=t)throw Error(a(92));if(Array.isArray(n)){if(!(1>=n.length))throw Error(a(93));n=n[0]}t=n}null==t&&(t=""),n=t}e._wrapperState={initialValue:be(n)}}function ze(e,t){var n=be(t.value),r=be(t.defaultValue);null!=n&&((n=""+n)!==e.value&&(e.value=n),null==t.defaultValue&&e.defaultValue!==n&&(e.defaultValue=n)),null!=r&&(e.defaultValue=""+r)}function Fe(e){var t=e.textContent;t===e._wrapperState.initialValue&&""!==t&&null!==t&&(e.value=t)}var je="http://www.w3.org/1999/xhtml",De="http://www.w3.org/2000/svg";function Re(e){switch(e){case"svg":return"http://www.w3.org/2000/svg";case"math":return"http://www.w3.org/1998/Math/MathML";default:return"http://www.w3.org/1999/xhtml"}}function Ie(e,t){return null==e||"http://www.w3.org/1999/xhtml"===e?Re(t):"http://www.w3.org/2000/svg"===e&&"foreignObject"===t?"http://www.w3.org/1999/xhtml":e}var Le,Ne=function(e){return"undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction?function(t,n,r,o){MSApp.execUnsafeLocalFunction((function(){return e(t,n)}))}:e}((function(e,t){if(e.namespaceURI!==De||"innerHTML"in e)e.innerHTML=t;else{for((Le=Le||document.createElement("div")).innerHTML="<svg>"+t.valueOf().toString()+"</svg>",t=Le.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}}));function Ve(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&3===n.nodeType)return void(n.nodeValue=t)}e.textContent=t}function He(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var Be={animationend:He("Animation","AnimationEnd"),animationiteration:He("Animation","AnimationIteration"),animationstart:He("Animation","AnimationStart"),transitionend:He("Transition","TransitionEnd")},We={},Ue={};function $e(e){if(We[e])return We[e];if(!Be[e])return e;var t,n=Be[e];for(t in n)if(n.hasOwnProperty(t)&&t in Ue)return We[e]=n[t];return e}T&&(Ue=document.createElement("div").style,"AnimationEvent"in window||(delete Be.animationend.animation,delete Be.animationiteration.animation,delete Be.animationstart.animation),"TransitionEvent"in window||delete Be.transitionend.transition);var Ye=$e("animationend"),Qe=$e("animationiteration"),Xe=$e("animationstart"),Ge=$e("transitionend"),qe="abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange seeked seeking stalled suspend timeupdate volumechange waiting".split(" "),Ke=new("function"==typeof WeakMap?WeakMap:Map);function Ze(e){var t=Ke.get(e);return void 0===t&&(t=new Map,Ke.set(e,t)),t}function Je(e){var t=e,n=e;if(e.alternate)for(;t.return;)t=t.return;else{e=t;do{0!=(1026&(t=e).effectTag)&&(n=t.return),e=t.return}while(e)}return 3===t.tag?n:null}function et(e){if(13===e.tag){var t=e.memoizedState;if(null===t&&(null!==(e=e.alternate)&&(t=e.memoizedState)),null!==t)return t.dehydrated}return null}function tt(e){if(Je(e)!==e)throw Error(a(188))}function nt(e){if(!(e=function(e){var t=e.alternate;if(!t){if(null===(t=Je(e)))throw Error(a(188));return t!==e?null:e}for(var n=e,r=t;;){var o=n.return;if(null===o)break;var i=o.alternate;if(null===i){if(null!==(r=o.return)){n=r;continue}break}if(o.child===i.child){for(i=o.child;i;){if(i===n)return tt(o),e;if(i===r)return tt(o),t;i=i.sibling}throw Error(a(188))}if(n.return!==r.return)n=o,r=i;else{for(var l=!1,u=o.child;u;){if(u===n){l=!0,n=o,r=i;break}if(u===r){l=!0,r=o,n=i;break}u=u.sibling}if(!l){for(u=i.child;u;){if(u===n){l=!0,n=i,r=o;break}if(u===r){l=!0,r=i,n=o;break}u=u.sibling}if(!l)throw Error(a(189))}}if(n.alternate!==r)throw Error(a(190))}if(3!==n.tag)throw Error(a(188));return n.stateNode.current===n?e:t}(e)))return null;for(var t=e;;){if(5===t.tag||6===t.tag)return t;if(t.child)t.child.return=t,t=t.child;else{if(t===e)break;for(;!t.sibling;){if(!t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}}return null}function rt(e,t){if(null==t)throw Error(a(30));return null==e?t:Array.isArray(e)?Array.isArray(t)?(e.push.apply(e,t),e):(e.push(t),e):Array.isArray(t)?[e].concat(t):[e,t]}function ot(e,t,n){Array.isArray(e)?e.forEach(t,n):e&&t.call(n,e)}var it=null;function at(e){if(e){var t=e._dispatchListeners,n=e._dispatchInstances;if(Array.isArray(t))for(var r=0;r<t.length&&!e.isPropagationStopped();r++)v(e,t[r],n[r]);else t&&v(e,t,n);e._dispatchListeners=null,e._dispatchInstances=null,e.isPersistent()||e.constructor.release(e)}}function lt(e){if(null!==e&&(it=rt(it,e)),e=it,it=null,e){if(ot(e,at),it)throw Error(a(95));if(s)throw e=f,s=!1,f=null,e}}function ut(e){return(e=e.target||e.srcElement||window).correspondingUseElement&&(e=e.correspondingUseElement),3===e.nodeType?e.parentNode:e}function ct(e){if(!T)return!1;var t=(e="on"+e)in document;return t||((t=document.createElement("div")).setAttribute(e,"return;"),t="function"==typeof t[e]),t}var st=[];function ft(e){e.topLevelType=null,e.nativeEvent=null,e.targetInst=null,e.ancestors.length=0,10>st.length&&st.push(e)}function pt(e,t,n,r){if(st.length){var o=st.pop();return o.topLevelType=e,o.eventSystemFlags=r,o.nativeEvent=t,o.targetInst=n,o}return{topLevelType:e,eventSystemFlags:r,nativeEvent:t,targetInst:n,ancestors:[]}}function dt(e){var t=e.targetInst,n=t;do{if(!n){e.ancestors.push(n);break}var r=n;if(3===r.tag)r=r.stateNode.containerInfo;else{for(;r.return;)r=r.return;r=3!==r.tag?null:r.stateNode.containerInfo}if(!r)break;5!==(t=n.tag)&&6!==t||e.ancestors.push(n),n=Tn(r)}while(n);for(n=0;n<e.ancestors.length;n++){t=e.ancestors[n];var o=ut(e.nativeEvent);r=e.topLevelType;var i=e.nativeEvent,a=e.eventSystemFlags;0===n&&(a|=64);for(var l=null,u=0;u<E.length;u++){var c=E[u];c&&(c=c.extractEvents(r,t,i,o,a))&&(l=rt(l,c))}lt(l)}}function ht(e,t,n){if(!n.has(e)){switch(e){case"scroll":Xt(t,"scroll",!0);break;case"focus":case"blur":Xt(t,"focus",!0),Xt(t,"blur",!0),n.set("blur",null),n.set("focus",null);break;case"cancel":case"close":ct(e)&&Xt(t,e,!0);break;case"invalid":case"submit":case"reset":break;default:-1===qe.indexOf(e)&&Qt(e,t)}n.set(e,null)}}var mt,gt,vt,bt=!1,yt=[],wt=null,xt=null,Et=null,kt=new Map,St=new Map,Ot=[],Ct="mousedown mouseup touchcancel touchend touchstart auxclick dblclick pointercancel pointerdown pointerup dragend dragstart drop compositionend compositionstart keydown keypress keyup input textInput close cancel copy cut paste click change contextmenu reset submit".split(" "),Tt="focus blur dragenter dragleave mouseover mouseout pointerover pointerout gotpointercapture lostpointercapture".split(" ");function _t(e,t,n,r,o){return{blockedOn:e,topLevelType:t,eventSystemFlags:32|n,nativeEvent:o,container:r}}function Pt(e,t){switch(e){case"focus":case"blur":wt=null;break;case"dragenter":case"dragleave":xt=null;break;case"mouseover":case"mouseout":Et=null;break;case"pointerover":case"pointerout":kt.delete(t.pointerId);break;case"gotpointercapture":case"lostpointercapture":St.delete(t.pointerId)}}function At(e,t,n,r,o,i){return null===e||e.nativeEvent!==i?(e=_t(t,n,r,o,i),null!==t&&(null!==(t=_n(t))&>(t)),e):(e.eventSystemFlags|=r,e)}function Mt(e){var t=Tn(e.target);if(null!==t){var n=Je(t);if(null!==n)if(13===(t=n.tag)){if(null!==(t=et(n)))return e.blockedOn=t,void i.unstable_runWithPriority(e.priority,(function(){vt(n)}))}else if(3===t&&n.stateNode.hydrate)return void(e.blockedOn=3===n.tag?n.stateNode.containerInfo:null)}e.blockedOn=null}function zt(e){if(null!==e.blockedOn)return!1;var t=Zt(e.topLevelType,e.eventSystemFlags,e.container,e.nativeEvent);if(null!==t){var n=_n(t);return null!==n&>(n),e.blockedOn=t,!1}return!0}function Ft(e,t,n){zt(e)&&n.delete(t)}function jt(){for(bt=!1;0<yt.length;){var e=yt[0];if(null!==e.blockedOn){null!==(e=_n(e.blockedOn))&&mt(e);break}var t=Zt(e.topLevelType,e.eventSystemFlags,e.container,e.nativeEvent);null!==t?e.blockedOn=t:yt.shift()}null!==wt&&zt(wt)&&(wt=null),null!==xt&&zt(xt)&&(xt=null),null!==Et&&zt(Et)&&(Et=null),kt.forEach(Ft),St.forEach(Ft)}function Dt(e,t){e.blockedOn===t&&(e.blockedOn=null,bt||(bt=!0,i.unstable_scheduleCallback(i.unstable_NormalPriority,jt)))}function Rt(e){function t(t){return Dt(t,e)}if(0<yt.length){Dt(yt[0],e);for(var n=1;n<yt.length;n++){var r=yt[n];r.blockedOn===e&&(r.blockedOn=null)}}for(null!==wt&&Dt(wt,e),null!==xt&&Dt(xt,e),null!==Et&&Dt(Et,e),kt.forEach(t),St.forEach(t),n=0;n<Ot.length;n++)(r=Ot[n]).blockedOn===e&&(r.blockedOn=null);for(;0<Ot.length&&null===(n=Ot[0]).blockedOn;)Mt(n),null===n.blockedOn&&Ot.shift()}var It={},Lt=new Map,Nt=new Map,Vt=["abort","abort",Ye,"animationEnd",Qe,"animationIteration",Xe,"animationStart","canplay","canPlay","canplaythrough","canPlayThrough","durationchange","durationChange","emptied","emptied","encrypted","encrypted","ended","ended","error","error","gotpointercapture","gotPointerCapture","load","load","loadeddata","loadedData","loadedmetadata","loadedMetadata","loadstart","loadStart","lostpointercapture","lostPointerCapture","playing","playing","progress","progress","seeking","seeking","stalled","stalled","suspend","suspend","timeupdate","timeUpdate",Ge,"transitionEnd","waiting","waiting"];function Ht(e,t){for(var n=0;n<e.length;n+=2){var r=e[n],o=e[n+1],i="on"+(o[0].toUpperCase()+o.slice(1));i={phasedRegistrationNames:{bubbled:i,captured:i+"Capture"},dependencies:[r],eventPriority:t},Nt.set(r,t),Lt.set(r,i),It[o]=i}}Ht("blur blur cancel cancel click click close close contextmenu contextMenu copy copy cut cut auxclick auxClick dblclick doubleClick dragend dragEnd dragstart dragStart drop drop focus focus input input invalid invalid keydown keyDown keypress keyPress keyup keyUp mousedown mouseDown mouseup mouseUp paste paste pause pause play play pointercancel pointerCancel pointerdown pointerDown pointerup pointerUp ratechange rateChange reset reset seeked seeked submit submit touchcancel touchCancel touchend touchEnd touchstart touchStart volumechange volumeChange".split(" "),0),Ht("drag drag dragenter dragEnter dragexit dragExit dragleave dragLeave dragover dragOver mousemove mouseMove mouseout mouseOut mouseover mouseOver pointermove pointerMove pointerout pointerOut pointerover pointerOver scroll scroll toggle toggle touchmove touchMove wheel wheel".split(" "),1),Ht(Vt,2);for(var Bt="change selectionchange textInput compositionstart compositionend compositionupdate".split(" "),Wt=0;Wt<Bt.length;Wt++)Nt.set(Bt[Wt],0);var Ut=i.unstable_UserBlockingPriority,$t=i.unstable_runWithPriority,Yt=!0;function Qt(e,t){Xt(t,e,!1)}function Xt(e,t,n){var r=Nt.get(t);switch(void 0===r?2:r){case 0:r=Gt.bind(null,t,1,e);break;case 1:r=qt.bind(null,t,1,e);break;default:r=Kt.bind(null,t,1,e)}n?e.addEventListener(t,r,!0):e.addEventListener(t,r,!1)}function Gt(e,t,n,r){L||R();var o=Kt,i=L;L=!0;try{D(o,e,t,n,r)}finally{(L=i)||V()}}function qt(e,t,n,r){$t(Ut,Kt.bind(null,e,t,n,r))}function Kt(e,t,n,r){if(Yt)if(0<yt.length&&-1<Ct.indexOf(e))e=_t(null,e,t,n,r),yt.push(e);else{var o=Zt(e,t,n,r);if(null===o)Pt(e,r);else if(-1<Ct.indexOf(e))e=_t(o,e,t,n,r),yt.push(e);else if(!function(e,t,n,r,o){switch(t){case"focus":return wt=At(wt,e,t,n,r,o),!0;case"dragenter":return xt=At(xt,e,t,n,r,o),!0;case"mouseover":return Et=At(Et,e,t,n,r,o),!0;case"pointerover":var i=o.pointerId;return kt.set(i,At(kt.get(i)||null,e,t,n,r,o)),!0;case"gotpointercapture":return i=o.pointerId,St.set(i,At(St.get(i)||null,e,t,n,r,o)),!0}return!1}(o,e,t,n,r)){Pt(e,r),e=pt(e,r,null,t);try{H(dt,e)}finally{ft(e)}}}}function Zt(e,t,n,r){if(null!==(n=Tn(n=ut(r)))){var o=Je(n);if(null===o)n=null;else{var i=o.tag;if(13===i){if(null!==(n=et(o)))return n;n=null}else if(3===i){if(o.stateNode.hydrate)return 3===o.tag?o.stateNode.containerInfo:null;n=null}else o!==n&&(n=null)}}e=pt(e,r,n,t);try{H(dt,e)}finally{ft(e)}return null}var Jt={animationIterationCount:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},en=["Webkit","ms","Moz","O"];function tn(e,t,n){return null==t||"boolean"==typeof t||""===t?"":n||"number"!=typeof t||0===t||Jt.hasOwnProperty(e)&&Jt[e]?(""+t).trim():t+"px"}function nn(e,t){for(var n in e=e.style,t)if(t.hasOwnProperty(n)){var r=0===n.indexOf("--"),o=tn(n,t[n],r);"float"===n&&(n="cssFloat"),r?e.setProperty(n,o):e[n]=o}}Object.keys(Jt).forEach((function(e){en.forEach((function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),Jt[t]=Jt[e]}))}));var rn=o({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function on(e,t){if(t){if(rn[e]&&(null!=t.children||null!=t.dangerouslySetInnerHTML))throw Error(a(137,e,""));if(null!=t.dangerouslySetInnerHTML){if(null!=t.children)throw Error(a(60));if("object"!=typeof t.dangerouslySetInnerHTML||!("__html"in t.dangerouslySetInnerHTML))throw Error(a(61))}if(null!=t.style&&"object"!=typeof t.style)throw Error(a(62,""))}}function an(e,t){if(-1===e.indexOf("-"))return"string"==typeof t.is;switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var ln=je;function un(e,t){var n=Ze(e=9===e.nodeType||11===e.nodeType?e:e.ownerDocument);t=O[t];for(var r=0;r<t.length;r++)ht(t[r],e,n)}function cn(){}function sn(e){if(void 0===(e=e||("undefined"!=typeof document?document:void 0)))return null;try{return e.activeElement||e.body}catch(t){return e.body}}function fn(e){for(;e&&e.firstChild;)e=e.firstChild;return e}function pn(e,t){var n,r=fn(e);for(e=0;r;){if(3===r.nodeType){if(n=e+r.textContent.length,e<=t&&n>=t)return{node:r,offset:t-e};e=n}e:{for(;r;){if(r.nextSibling){r=r.nextSibling;break e}r=r.parentNode}r=void 0}r=fn(r)}}function dn(){for(var e=window,t=sn();t instanceof e.HTMLIFrameElement;){try{var n="string"==typeof t.contentWindow.location.href}catch(e){n=!1}if(!n)break;t=sn((e=t.contentWindow).document)}return t}function hn(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var mn=null,gn=null;function vn(e,t){switch(e){case"button":case"input":case"select":case"textarea":return!!t.autoFocus}return!1}function bn(e,t){return"textarea"===e||"option"===e||"noscript"===e||"string"==typeof t.children||"number"==typeof t.children||"object"==typeof t.dangerouslySetInnerHTML&&null!==t.dangerouslySetInnerHTML&&null!=t.dangerouslySetInnerHTML.__html}var yn="function"==typeof setTimeout?setTimeout:void 0,wn="function"==typeof clearTimeout?clearTimeout:void 0;function xn(e){for(;null!=e;e=e.nextSibling){var t=e.nodeType;if(1===t||3===t)break}return e}function En(e){e=e.previousSibling;for(var t=0;e;){if(8===e.nodeType){var n=e.data;if("$"===n||"$!"===n||"$?"===n){if(0===t)return e;t--}else"/$"===n&&t++}e=e.previousSibling}return null}var kn=Math.random().toString(36).slice(2),Sn="__reactInternalInstance$"+kn,On="__reactEventHandlers$"+kn,Cn="__reactContainere$"+kn;function Tn(e){var t=e[Sn];if(t)return t;for(var n=e.parentNode;n;){if(t=n[Cn]||n[Sn]){if(n=t.alternate,null!==t.child||null!==n&&null!==n.child)for(e=En(e);null!==e;){if(n=e[Sn])return n;e=En(e)}return t}n=(e=n).parentNode}return null}function _n(e){return!(e=e[Sn]||e[Cn])||5!==e.tag&&6!==e.tag&&13!==e.tag&&3!==e.tag?null:e}function Pn(e){if(5===e.tag||6===e.tag)return e.stateNode;throw Error(a(33))}function An(e){return e[On]||null}function Mn(e){do{e=e.return}while(e&&5!==e.tag);return e||null}function zn(e,t){var n=e.stateNode;if(!n)return null;var r=h(n);if(!r)return null;n=r[t];e:switch(t){case"onClick":case"onClickCapture":case"onDoubleClick":case"onDoubleClickCapture":case"onMouseDown":case"onMouseDownCapture":case"onMouseMove":case"onMouseMoveCapture":case"onMouseUp":case"onMouseUpCapture":case"onMouseEnter":(r=!r.disabled)||(r=!("button"===(e=e.type)||"input"===e||"select"===e||"textarea"===e)),e=!r;break e;default:e=!1}if(e)return null;if(n&&"function"!=typeof n)throw Error(a(231,t,typeof n));return n}function Fn(e,t,n){(t=zn(e,n.dispatchConfig.phasedRegistrationNames[t]))&&(n._dispatchListeners=rt(n._dispatchListeners,t),n._dispatchInstances=rt(n._dispatchInstances,e))}function jn(e){if(e&&e.dispatchConfig.phasedRegistrationNames){for(var t=e._targetInst,n=[];t;)n.push(t),t=Mn(t);for(t=n.length;0<t--;)Fn(n[t],"captured",e);for(t=0;t<n.length;t++)Fn(n[t],"bubbled",e)}}function Dn(e,t,n){e&&n&&n.dispatchConfig.registrationName&&(t=zn(e,n.dispatchConfig.registrationName))&&(n._dispatchListeners=rt(n._dispatchListeners,t),n._dispatchInstances=rt(n._dispatchInstances,e))}function Rn(e){e&&e.dispatchConfig.registrationName&&Dn(e._targetInst,null,e)}function In(e){ot(e,jn)}var Ln=null,Nn=null,Vn=null;function Hn(){if(Vn)return Vn;var e,t,n=Nn,r=n.length,o="value"in Ln?Ln.value:Ln.textContent,i=o.length;for(e=0;e<r&&n[e]===o[e];e++);var a=r-e;for(t=1;t<=a&&n[r-t]===o[i-t];t++);return Vn=o.slice(e,1<t?1-t:void 0)}function Bn(){return!0}function Wn(){return!1}function Un(e,t,n,r){for(var o in this.dispatchConfig=e,this._targetInst=t,this.nativeEvent=n,e=this.constructor.Interface)e.hasOwnProperty(o)&&((t=e[o])?this[o]=t(n):"target"===o?this.target=r:this[o]=n[o]);return this.isDefaultPrevented=(null!=n.defaultPrevented?n.defaultPrevented:!1===n.returnValue)?Bn:Wn,this.isPropagationStopped=Wn,this}function $n(e,t,n,r){if(this.eventPool.length){var o=this.eventPool.pop();return this.call(o,e,t,n,r),o}return new this(e,t,n,r)}function Yn(e){if(!(e instanceof this))throw Error(a(279));e.destructor(),10>this.eventPool.length&&this.eventPool.push(e)}function Qn(e){e.eventPool=[],e.getPooled=$n,e.release=Yn}o(Un.prototype,{preventDefault:function(){this.defaultPrevented=!0;var e=this.nativeEvent;e&&(e.preventDefault?e.preventDefault():"unknown"!=typeof e.returnValue&&(e.returnValue=!1),this.isDefaultPrevented=Bn)},stopPropagation:function(){var e=this.nativeEvent;e&&(e.stopPropagation?e.stopPropagation():"unknown"!=typeof e.cancelBubble&&(e.cancelBubble=!0),this.isPropagationStopped=Bn)},persist:function(){this.isPersistent=Bn},isPersistent:Wn,destructor:function(){var e,t=this.constructor.Interface;for(e in t)this[e]=null;this.nativeEvent=this._targetInst=this.dispatchConfig=null,this.isPropagationStopped=this.isDefaultPrevented=Wn,this._dispatchInstances=this._dispatchListeners=null}}),Un.Interface={type:null,target:null,currentTarget:function(){return null},eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(e){return e.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null},Un.extend=function(e){function t(){}function n(){return r.apply(this,arguments)}var r=this;t.prototype=r.prototype;var i=new t;return o(i,n.prototype),n.prototype=i,n.prototype.constructor=n,n.Interface=o({},r.Interface,e),n.extend=r.extend,Qn(n),n},Qn(Un);var Xn=Un.extend({data:null}),Gn=Un.extend({data:null}),qn=[9,13,27,32],Kn=T&&"CompositionEvent"in window,Zn=null;T&&"documentMode"in document&&(Zn=document.documentMode);var Jn=T&&"TextEvent"in window&&!Zn,er=T&&(!Kn||Zn&&8<Zn&&11>=Zn),tr=String.fromCharCode(32),nr={beforeInput:{phasedRegistrationNames:{bubbled:"onBeforeInput",captured:"onBeforeInputCapture"},dependencies:["compositionend","keypress","textInput","paste"]},compositionEnd:{phasedRegistrationNames:{bubbled:"onCompositionEnd",captured:"onCompositionEndCapture"},dependencies:"blur compositionend keydown keypress keyup mousedown".split(" ")},compositionStart:{phasedRegistrationNames:{bubbled:"onCompositionStart",captured:"onCompositionStartCapture"},dependencies:"blur compositionstart keydown keypress keyup mousedown".split(" ")},compositionUpdate:{phasedRegistrationNames:{bubbled:"onCompositionUpdate",captured:"onCompositionUpdateCapture"},dependencies:"blur compositionupdate keydown keypress keyup mousedown".split(" ")}},rr=!1;function or(e,t){switch(e){case"keyup":return-1!==qn.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"blur":return!0;default:return!1}}function ir(e){return"object"==typeof(e=e.detail)&&"data"in e?e.data:null}var ar=!1;var lr={eventTypes:nr,extractEvents:function(e,t,n,r){var o;if(Kn)e:{switch(e){case"compositionstart":var i=nr.compositionStart;break e;case"compositionend":i=nr.compositionEnd;break e;case"compositionupdate":i=nr.compositionUpdate;break e}i=void 0}else ar?or(e,n)&&(i=nr.compositionEnd):"keydown"===e&&229===n.keyCode&&(i=nr.compositionStart);return i?(er&&"ko"!==n.locale&&(ar||i!==nr.compositionStart?i===nr.compositionEnd&&ar&&(o=Hn()):(Nn="value"in(Ln=r)?Ln.value:Ln.textContent,ar=!0)),i=Xn.getPooled(i,t,n,r),o?i.data=o:null!==(o=ir(n))&&(i.data=o),In(i),o=i):o=null,(e=Jn?function(e,t){switch(e){case"compositionend":return ir(t);case"keypress":return 32!==t.which?null:(rr=!0,tr);case"textInput":return(e=t.data)===tr&&rr?null:e;default:return null}}(e,n):function(e,t){if(ar)return"compositionend"===e||!Kn&&or(e,t)?(e=Hn(),Vn=Nn=Ln=null,ar=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1<t.char.length)return t.char;if(t.which)return String.fromCharCode(t.which)}return null;case"compositionend":return er&&"ko"!==t.locale?null:t.data;default:return null}}(e,n))?((t=Gn.getPooled(nr.beforeInput,t,n,r)).data=e,In(t)):t=null,null===o?t:null===t?o:[o,t]}},ur={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function cr(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return"input"===t?!!ur[e.type]:"textarea"===t}var sr={change:{phasedRegistrationNames:{bubbled:"onChange",captured:"onChangeCapture"},dependencies:"blur change click focus input keydown keyup selectionchange".split(" ")}};function fr(e,t,n){return(e=Un.getPooled(sr.change,e,t,n)).type="change",z(n),In(e),e}var pr=null,dr=null;function hr(e){lt(e)}function mr(e){if(xe(Pn(e)))return e}function gr(e,t){if("change"===e)return t}var vr=!1;function br(){pr&&(pr.detachEvent("onpropertychange",yr),dr=pr=null)}function yr(e){if("value"===e.propertyName&&mr(dr))if(e=fr(dr,e,ut(e)),L)lt(e);else{L=!0;try{j(hr,e)}finally{L=!1,V()}}}function wr(e,t,n){"focus"===e?(br(),dr=n,(pr=t).attachEvent("onpropertychange",yr)):"blur"===e&&br()}function xr(e){if("selectionchange"===e||"keyup"===e||"keydown"===e)return mr(dr)}function Er(e,t){if("click"===e)return mr(t)}function kr(e,t){if("input"===e||"change"===e)return mr(t)}T&&(vr=ct("input")&&(!document.documentMode||9<document.documentMode));var Sr={eventTypes:sr,_isInputEventSupported:vr,extractEvents:function(e,t,n,r){var o=t?Pn(t):window,i=o.nodeName&&o.nodeName.toLowerCase();if("select"===i||"input"===i&&"file"===o.type)var a=gr;else if(cr(o))if(vr)a=kr;else{a=xr;var l=wr}else(i=o.nodeName)&&"input"===i.toLowerCase()&&("checkbox"===o.type||"radio"===o.type)&&(a=Er);if(a&&(a=a(e,t)))return fr(a,n,r);l&&l(e,o,t),"blur"===e&&(e=o._wrapperState)&&e.controlled&&"number"===o.type&&Te(o,"number",o.value)}},Or=Un.extend({view:null,detail:null}),Cr={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function Tr(e){var t=this.nativeEvent;return t.getModifierState?t.getModifierState(e):!!(e=Cr[e])&&!!t[e]}function _r(){return Tr}var Pr=0,Ar=0,Mr=!1,zr=!1,Fr=Or.extend({screenX:null,screenY:null,clientX:null,clientY:null,pageX:null,pageY:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,getModifierState:_r,button:null,buttons:null,relatedTarget:function(e){return e.relatedTarget||(e.fromElement===e.srcElement?e.toElement:e.fromElement)},movementX:function(e){if("movementX"in e)return e.movementX;var t=Pr;return Pr=e.screenX,Mr?"mousemove"===e.type?e.screenX-t:0:(Mr=!0,0)},movementY:function(e){if("movementY"in e)return e.movementY;var t=Ar;return Ar=e.screenY,zr?"mousemove"===e.type?e.screenY-t:0:(zr=!0,0)}}),jr=Fr.extend({pointerId:null,width:null,height:null,pressure:null,tangentialPressure:null,tiltX:null,tiltY:null,twist:null,pointerType:null,isPrimary:null}),Dr={mouseEnter:{registrationName:"onMouseEnter",dependencies:["mouseout","mouseover"]},mouseLeave:{registrationName:"onMouseLeave",dependencies:["mouseout","mouseover"]},pointerEnter:{registrationName:"onPointerEnter",dependencies:["pointerout","pointerover"]},pointerLeave:{registrationName:"onPointerLeave",dependencies:["pointerout","pointerover"]}},Rr={eventTypes:Dr,extractEvents:function(e,t,n,r,o){var i="mouseover"===e||"pointerover"===e,a="mouseout"===e||"pointerout"===e;if(i&&0==(32&o)&&(n.relatedTarget||n.fromElement)||!a&&!i)return null;(i=r.window===r?r:(i=r.ownerDocument)?i.defaultView||i.parentWindow:window,a)?(a=t,null!==(t=(t=n.relatedTarget||n.toElement)?Tn(t):null)&&(t!==Je(t)||5!==t.tag&&6!==t.tag)&&(t=null)):a=null;if(a===t)return null;if("mouseout"===e||"mouseover"===e)var l=Fr,u=Dr.mouseLeave,c=Dr.mouseEnter,s="mouse";else"pointerout"!==e&&"pointerover"!==e||(l=jr,u=Dr.pointerLeave,c=Dr.pointerEnter,s="pointer");if(e=null==a?i:Pn(a),i=null==t?i:Pn(t),(u=l.getPooled(u,a,n,r)).type=s+"leave",u.target=e,u.relatedTarget=i,(n=l.getPooled(c,t,n,r)).type=s+"enter",n.target=i,n.relatedTarget=e,s=t,(r=a)&&s)e:{for(c=s,a=0,e=l=r;e;e=Mn(e))a++;for(e=0,t=c;t;t=Mn(t))e++;for(;0<a-e;)l=Mn(l),a--;for(;0<e-a;)c=Mn(c),e--;for(;a--;){if(l===c||l===c.alternate)break e;l=Mn(l),c=Mn(c)}l=null}else l=null;for(c=l,l=[];r&&r!==c&&(null===(a=r.alternate)||a!==c);)l.push(r),r=Mn(r);for(r=[];s&&s!==c&&(null===(a=s.alternate)||a!==c);)r.push(s),s=Mn(s);for(s=0;s<l.length;s++)Dn(l[s],"bubbled",u);for(s=r.length;0<s--;)Dn(r[s],"captured",n);return 0==(64&o)?[u]:[u,n]}};var Ir="function"==typeof Object.is?Object.is:function(e,t){return e===t&&(0!==e||1/e==1/t)||e!=e&&t!=t},Lr=Object.prototype.hasOwnProperty;function Nr(e,t){if(Ir(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),r=Object.keys(t);if(n.length!==r.length)return!1;for(r=0;r<n.length;r++)if(!Lr.call(t,n[r])||!Ir(e[n[r]],t[n[r]]))return!1;return!0}var Vr=T&&"documentMode"in document&&11>=document.documentMode,Hr={select:{phasedRegistrationNames:{bubbled:"onSelect",captured:"onSelectCapture"},dependencies:"blur contextmenu dragend focus keydown keyup mousedown mouseup selectionchange".split(" ")}},Br=null,Wr=null,Ur=null,$r=!1;function Yr(e,t){var n=t.window===t?t.document:9===t.nodeType?t:t.ownerDocument;return $r||null==Br||Br!==sn(n)?null:("selectionStart"in(n=Br)&&hn(n)?n={start:n.selectionStart,end:n.selectionEnd}:n={anchorNode:(n=(n.ownerDocument&&n.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:n.anchorOffset,focusNode:n.focusNode,focusOffset:n.focusOffset},Ur&&Nr(Ur,n)?null:(Ur=n,(e=Un.getPooled(Hr.select,Wr,e,t)).type="select",e.target=Br,In(e),e))}var Qr={eventTypes:Hr,extractEvents:function(e,t,n,r,o,i){if(!(i=!(o=i||(r.window===r?r.document:9===r.nodeType?r:r.ownerDocument)))){e:{o=Ze(o),i=O.onSelect;for(var a=0;a<i.length;a++)if(!o.has(i[a])){o=!1;break e}o=!0}i=!o}if(i)return null;switch(o=t?Pn(t):window,e){case"focus":(cr(o)||"true"===o.contentEditable)&&(Br=o,Wr=t,Ur=null);break;case"blur":Ur=Wr=Br=null;break;case"mousedown":$r=!0;break;case"contextmenu":case"mouseup":case"dragend":return $r=!1,Yr(n,r);case"selectionchange":if(Vr)break;case"keydown":case"keyup":return Yr(n,r)}return null}},Xr=Un.extend({animationName:null,elapsedTime:null,pseudoElement:null}),Gr=Un.extend({clipboardData:function(e){return"clipboardData"in e?e.clipboardData:window.clipboardData}}),qr=Or.extend({relatedTarget:null});function Kr(e){var t=e.keyCode;return"charCode"in e?0===(e=e.charCode)&&13===t&&(e=13):e=t,10===e&&(e=13),32<=e||13===e?e:0}var Zr={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},Jr={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"},eo=Or.extend({key:function(e){if(e.key){var t=Zr[e.key]||e.key;if("Unidentified"!==t)return t}return"keypress"===e.type?13===(e=Kr(e))?"Enter":String.fromCharCode(e):"keydown"===e.type||"keyup"===e.type?Jr[e.keyCode]||"Unidentified":""},location:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,repeat:null,locale:null,getModifierState:_r,charCode:function(e){return"keypress"===e.type?Kr(e):0},keyCode:function(e){return"keydown"===e.type||"keyup"===e.type?e.keyCode:0},which:function(e){return"keypress"===e.type?Kr(e):"keydown"===e.type||"keyup"===e.type?e.keyCode:0}}),to=Fr.extend({dataTransfer:null}),no=Or.extend({touches:null,targetTouches:null,changedTouches:null,altKey:null,metaKey:null,ctrlKey:null,shiftKey:null,getModifierState:_r}),ro=Un.extend({propertyName:null,elapsedTime:null,pseudoElement:null}),oo=Fr.extend({deltaX:function(e){return"deltaX"in e?e.deltaX:"wheelDeltaX"in e?-e.wheelDeltaX:0},deltaY:function(e){return"deltaY"in e?e.deltaY:"wheelDeltaY"in e?-e.wheelDeltaY:"wheelDelta"in e?-e.wheelDelta:0},deltaZ:null,deltaMode:null}),io={eventTypes:It,extractEvents:function(e,t,n,r){var o=Lt.get(e);if(!o)return null;switch(e){case"keypress":if(0===Kr(n))return null;case"keydown":case"keyup":e=eo;break;case"blur":case"focus":e=qr;break;case"click":if(2===n.button)return null;case"auxclick":case"dblclick":case"mousedown":case"mousemove":case"mouseup":case"mouseout":case"mouseover":case"contextmenu":e=Fr;break;case"drag":case"dragend":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"dragstart":case"drop":e=to;break;case"touchcancel":case"touchend":case"touchmove":case"touchstart":e=no;break;case Ye:case Qe:case Xe:e=Xr;break;case Ge:e=ro;break;case"scroll":e=Or;break;case"wheel":e=oo;break;case"copy":case"cut":case"paste":e=Gr;break;case"gotpointercapture":case"lostpointercapture":case"pointercancel":case"pointerdown":case"pointermove":case"pointerout":case"pointerover":case"pointerup":e=jr;break;default:e=Un}return In(t=e.getPooled(o,t,n,r)),t}};if(b)throw Error(a(101));b=Array.prototype.slice.call("ResponderEventPlugin SimpleEventPlugin EnterLeaveEventPlugin ChangeEventPlugin SelectEventPlugin BeforeInputEventPlugin".split(" ")),w(),h=An,m=_n,g=Pn,C({SimpleEventPlugin:io,EnterLeaveEventPlugin:Rr,ChangeEventPlugin:Sr,SelectEventPlugin:Qr,BeforeInputEventPlugin:lr});var ao=[],lo=-1;function uo(e){0>lo||(e.current=ao[lo],ao[lo]=null,lo--)}function co(e,t){lo++,ao[lo]=e.current,e.current=t}var so={},fo={current:so},po={current:!1},ho=so;function mo(e,t){var n=e.type.contextTypes;if(!n)return so;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var o,i={};for(o in n)i[o]=t[o];return r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=i),i}function go(e){return null!=(e=e.childContextTypes)}function vo(){uo(po),uo(fo)}function bo(e,t,n){if(fo.current!==so)throw Error(a(168));co(fo,t),co(po,n)}function yo(e,t,n){var r=e.stateNode;if(e=t.childContextTypes,"function"!=typeof r.getChildContext)return n;for(var i in r=r.getChildContext())if(!(i in e))throw Error(a(108,ge(t)||"Unknown",i));return o({},n,{},r)}function wo(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||so,ho=fo.current,co(fo,e),co(po,po.current),!0}function xo(e,t,n){var r=e.stateNode;if(!r)throw Error(a(169));n?(e=yo(e,t,ho),r.__reactInternalMemoizedMergedChildContext=e,uo(po),uo(fo),co(fo,e)):uo(po),co(po,n)}var Eo=i.unstable_runWithPriority,ko=i.unstable_scheduleCallback,So=i.unstable_cancelCallback,Oo=i.unstable_requestPaint,Co=i.unstable_now,To=i.unstable_getCurrentPriorityLevel,_o=i.unstable_ImmediatePriority,Po=i.unstable_UserBlockingPriority,Ao=i.unstable_NormalPriority,Mo=i.unstable_LowPriority,zo=i.unstable_IdlePriority,Fo={},jo=i.unstable_shouldYield,Do=void 0!==Oo?Oo:function(){},Ro=null,Io=null,Lo=!1,No=Co(),Vo=1e4>No?Co:function(){return Co()-No};function Ho(){switch(To()){case _o:return 99;case Po:return 98;case Ao:return 97;case Mo:return 96;case zo:return 95;default:throw Error(a(332))}}function Bo(e){switch(e){case 99:return _o;case 98:return Po;case 97:return Ao;case 96:return Mo;case 95:return zo;default:throw Error(a(332))}}function Wo(e,t){return e=Bo(e),Eo(e,t)}function Uo(e,t,n){return e=Bo(e),ko(e,t,n)}function $o(e){return null===Ro?(Ro=[e],Io=ko(_o,Qo)):Ro.push(e),Fo}function Yo(){if(null!==Io){var e=Io;Io=null,So(e)}Qo()}function Qo(){if(!Lo&&null!==Ro){Lo=!0;var e=0;try{var t=Ro;Wo(99,(function(){for(;e<t.length;e++){var n=t[e];do{n=n(!0)}while(null!==n)}})),Ro=null}catch(t){throw null!==Ro&&(Ro=Ro.slice(e+1)),ko(_o,Yo),t}finally{Lo=!1}}}function Xo(e,t,n){return 1073741821-(1+((1073741821-e+t/10)/(n/=10)|0))*n}function Go(e,t){if(e&&e.defaultProps)for(var n in t=o({},t),e=e.defaultProps)void 0===t[n]&&(t[n]=e[n]);return t}var qo={current:null},Ko=null,Zo=null,Jo=null;function ei(){Jo=Zo=Ko=null}function ti(e){var t=qo.current;uo(qo),e.type._context._currentValue=t}function ni(e,t){for(;null!==e;){var n=e.alternate;if(e.childExpirationTime<t)e.childExpirationTime=t,null!==n&&n.childExpirationTime<t&&(n.childExpirationTime=t);else{if(!(null!==n&&n.childExpirationTime<t))break;n.childExpirationTime=t}e=e.return}}function ri(e,t){Ko=e,Jo=Zo=null,null!==(e=e.dependencies)&&null!==e.firstContext&&(e.expirationTime>=t&&(Aa=!0),e.firstContext=null)}function oi(e,t){if(Jo!==e&&!1!==t&&0!==t)if("number"==typeof t&&1073741823!==t||(Jo=e,t=1073741823),t={context:e,observedBits:t,next:null},null===Zo){if(null===Ko)throw Error(a(308));Zo=t,Ko.dependencies={expirationTime:0,firstContext:t,responders:null}}else Zo=Zo.next=t;return e._currentValue}var ii=!1;function ai(e){e.updateQueue={baseState:e.memoizedState,baseQueue:null,shared:{pending:null},effects:null}}function li(e,t){e=e.updateQueue,t.updateQueue===e&&(t.updateQueue={baseState:e.baseState,baseQueue:e.baseQueue,shared:e.shared,effects:e.effects})}function ui(e,t){return(e={expirationTime:e,suspenseConfig:t,tag:0,payload:null,callback:null,next:null}).next=e}function ci(e,t){if(null!==(e=e.updateQueue)){var n=(e=e.shared).pending;null===n?t.next=t:(t.next=n.next,n.next=t),e.pending=t}}function si(e,t){var n=e.alternate;null!==n&&li(n,e),null===(n=(e=e.updateQueue).baseQueue)?(e.baseQueue=t.next=t,t.next=t):(t.next=n.next,n.next=t)}function fi(e,t,n,r){var i=e.updateQueue;ii=!1;var a=i.baseQueue,l=i.shared.pending;if(null!==l){if(null!==a){var u=a.next;a.next=l.next,l.next=u}a=l,i.shared.pending=null,null!==(u=e.alternate)&&(null!==(u=u.updateQueue)&&(u.baseQueue=l))}if(null!==a){u=a.next;var c=i.baseState,s=0,f=null,p=null,d=null;if(null!==u)for(var h=u;;){if((l=h.expirationTime)<r){var m={expirationTime:h.expirationTime,suspenseConfig:h.suspenseConfig,tag:h.tag,payload:h.payload,callback:h.callback,next:null};null===d?(p=d=m,f=c):d=d.next=m,l>s&&(s=l)}else{null!==d&&(d=d.next={expirationTime:1073741823,suspenseConfig:h.suspenseConfig,tag:h.tag,payload:h.payload,callback:h.callback,next:null}),iu(l,h.suspenseConfig);e:{var g=e,v=h;switch(l=t,m=n,v.tag){case 1:if("function"==typeof(g=v.payload)){c=g.call(m,c,l);break e}c=g;break e;case 3:g.effectTag=-4097&g.effectTag|64;case 0:if(null==(l="function"==typeof(g=v.payload)?g.call(m,c,l):g))break e;c=o({},c,l);break e;case 2:ii=!0}}null!==h.callback&&(e.effectTag|=32,null===(l=i.effects)?i.effects=[h]:l.push(h))}if(null===(h=h.next)||h===u){if(null===(l=i.shared.pending))break;h=a.next=l.next,l.next=u,i.baseQueue=a=l,i.shared.pending=null}}null===d?f=c:d.next=p,i.baseState=f,i.baseQueue=d,au(s),e.expirationTime=s,e.memoizedState=c}}function pi(e,t,n){if(e=t.effects,t.effects=null,null!==e)for(t=0;t<e.length;t++){var r=e[t],o=r.callback;if(null!==o){if(r.callback=null,r=o,o=n,"function"!=typeof r)throw Error(a(191,r));r.call(o)}}}var di=q.ReactCurrentBatchConfig,hi=(new r.Component).refs;function mi(e,t,n,r){n=null==(n=n(r,t=e.memoizedState))?t:o({},t,n),e.memoizedState=n,0===e.expirationTime&&(e.updateQueue.baseState=n)}var gi={isMounted:function(e){return!!(e=e._reactInternalFiber)&&Je(e)===e},enqueueSetState:function(e,t,n){e=e._reactInternalFiber;var r=Yl(),o=di.suspense;(o=ui(r=Ql(r,e,o),o)).payload=t,null!=n&&(o.callback=n),ci(e,o),Xl(e,r)},enqueueReplaceState:function(e,t,n){e=e._reactInternalFiber;var r=Yl(),o=di.suspense;(o=ui(r=Ql(r,e,o),o)).tag=1,o.payload=t,null!=n&&(o.callback=n),ci(e,o),Xl(e,r)},enqueueForceUpdate:function(e,t){e=e._reactInternalFiber;var n=Yl(),r=di.suspense;(r=ui(n=Ql(n,e,r),r)).tag=2,null!=t&&(r.callback=t),ci(e,r),Xl(e,n)}};function vi(e,t,n,r,o,i,a){return"function"==typeof(e=e.stateNode).shouldComponentUpdate?e.shouldComponentUpdate(r,i,a):!t.prototype||!t.prototype.isPureReactComponent||(!Nr(n,r)||!Nr(o,i))}function bi(e,t,n){var r=!1,o=so,i=t.contextType;return"object"==typeof i&&null!==i?i=oi(i):(o=go(t)?ho:fo.current,i=(r=null!=(r=t.contextTypes))?mo(e,o):so),t=new t(n,i),e.memoizedState=null!==t.state&&void 0!==t.state?t.state:null,t.updater=gi,e.stateNode=t,t._reactInternalFiber=e,r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=o,e.__reactInternalMemoizedMaskedChildContext=i),t}function yi(e,t,n,r){e=t.state,"function"==typeof t.componentWillReceiveProps&&t.componentWillReceiveProps(n,r),"function"==typeof t.UNSAFE_componentWillReceiveProps&&t.UNSAFE_componentWillReceiveProps(n,r),t.state!==e&&gi.enqueueReplaceState(t,t.state,null)}function wi(e,t,n,r){var o=e.stateNode;o.props=n,o.state=e.memoizedState,o.refs=hi,ai(e);var i=t.contextType;"object"==typeof i&&null!==i?o.context=oi(i):(i=go(t)?ho:fo.current,o.context=mo(e,i)),fi(e,n,o,r),o.state=e.memoizedState,"function"==typeof(i=t.getDerivedStateFromProps)&&(mi(e,t,i,n),o.state=e.memoizedState),"function"==typeof t.getDerivedStateFromProps||"function"==typeof o.getSnapshotBeforeUpdate||"function"!=typeof o.UNSAFE_componentWillMount&&"function"!=typeof o.componentWillMount||(t=o.state,"function"==typeof o.componentWillMount&&o.componentWillMount(),"function"==typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount(),t!==o.state&&gi.enqueueReplaceState(o,o.state,null),fi(e,n,o,r),o.state=e.memoizedState),"function"==typeof o.componentDidMount&&(e.effectTag|=4)}var xi=Array.isArray;function Ei(e,t,n){if(null!==(e=n.ref)&&"function"!=typeof e&&"object"!=typeof e){if(n._owner){if(n=n._owner){if(1!==n.tag)throw Error(a(309));var r=n.stateNode}if(!r)throw Error(a(147,e));var o=""+e;return null!==t&&null!==t.ref&&"function"==typeof t.ref&&t.ref._stringRef===o?t.ref:((t=function(e){var t=r.refs;t===hi&&(t=r.refs={}),null===e?delete t[o]:t[o]=e})._stringRef=o,t)}if("string"!=typeof e)throw Error(a(284));if(!n._owner)throw Error(a(290,e))}return e}function ki(e,t){if("textarea"!==e.type)throw Error(a(31,"[object Object]"===Object.prototype.toString.call(t)?"object with keys {"+Object.keys(t).join(", ")+"}":t,""))}function Si(e){function t(t,n){if(e){var r=t.lastEffect;null!==r?(r.nextEffect=n,t.lastEffect=n):t.firstEffect=t.lastEffect=n,n.nextEffect=null,n.effectTag=8}}function n(n,r){if(!e)return null;for(;null!==r;)t(n,r),r=r.sibling;return null}function r(e,t){for(e=new Map;null!==t;)null!==t.key?e.set(t.key,t):e.set(t.index,t),t=t.sibling;return e}function o(e,t){return(e=Cu(e,t)).index=0,e.sibling=null,e}function i(t,n,r){return t.index=r,e?null!==(r=t.alternate)?(r=r.index)<n?(t.effectTag=2,n):r:(t.effectTag=2,n):n}function l(t){return e&&null===t.alternate&&(t.effectTag=2),t}function u(e,t,n,r){return null===t||6!==t.tag?((t=Pu(n,e.mode,r)).return=e,t):((t=o(t,n)).return=e,t)}function c(e,t,n,r){return null!==t&&t.elementType===n.type?((r=o(t,n.props)).ref=Ei(e,t,n),r.return=e,r):((r=Tu(n.type,n.key,n.props,null,e.mode,r)).ref=Ei(e,t,n),r.return=e,r)}function s(e,t,n,r){return null===t||4!==t.tag||t.stateNode.containerInfo!==n.containerInfo||t.stateNode.implementation!==n.implementation?((t=Au(n,e.mode,r)).return=e,t):((t=o(t,n.children||[])).return=e,t)}function f(e,t,n,r,i){return null===t||7!==t.tag?((t=_u(n,e.mode,r,i)).return=e,t):((t=o(t,n)).return=e,t)}function p(e,t,n){if("string"==typeof t||"number"==typeof t)return(t=Pu(""+t,e.mode,n)).return=e,t;if("object"==typeof t&&null!==t){switch(t.$$typeof){case ee:return(n=Tu(t.type,t.key,t.props,null,e.mode,n)).ref=Ei(e,null,t),n.return=e,n;case te:return(t=Au(t,e.mode,n)).return=e,t}if(xi(t)||me(t))return(t=_u(t,e.mode,n,null)).return=e,t;ki(e,t)}return null}function d(e,t,n,r){var o=null!==t?t.key:null;if("string"==typeof n||"number"==typeof n)return null!==o?null:u(e,t,""+n,r);if("object"==typeof n&&null!==n){switch(n.$$typeof){case ee:return n.key===o?n.type===ne?f(e,t,n.props.children,r,o):c(e,t,n,r):null;case te:return n.key===o?s(e,t,n,r):null}if(xi(n)||me(n))return null!==o?null:f(e,t,n,r,null);ki(e,n)}return null}function h(e,t,n,r,o){if("string"==typeof r||"number"==typeof r)return u(t,e=e.get(n)||null,""+r,o);if("object"==typeof r&&null!==r){switch(r.$$typeof){case ee:return e=e.get(null===r.key?n:r.key)||null,r.type===ne?f(t,e,r.props.children,o,r.key):c(t,e,r,o);case te:return s(t,e=e.get(null===r.key?n:r.key)||null,r,o)}if(xi(r)||me(r))return f(t,e=e.get(n)||null,r,o,null);ki(t,r)}return null}function m(o,a,l,u){for(var c=null,s=null,f=a,m=a=0,g=null;null!==f&&m<l.length;m++){f.index>m?(g=f,f=null):g=f.sibling;var v=d(o,f,l[m],u);if(null===v){null===f&&(f=g);break}e&&f&&null===v.alternate&&t(o,f),a=i(v,a,m),null===s?c=v:s.sibling=v,s=v,f=g}if(m===l.length)return n(o,f),c;if(null===f){for(;m<l.length;m++)null!==(f=p(o,l[m],u))&&(a=i(f,a,m),null===s?c=f:s.sibling=f,s=f);return c}for(f=r(o,f);m<l.length;m++)null!==(g=h(f,o,m,l[m],u))&&(e&&null!==g.alternate&&f.delete(null===g.key?m:g.key),a=i(g,a,m),null===s?c=g:s.sibling=g,s=g);return e&&f.forEach((function(e){return t(o,e)})),c}function g(o,l,u,c){var s=me(u);if("function"!=typeof s)throw Error(a(150));if(null==(u=s.call(u)))throw Error(a(151));for(var f=s=null,m=l,g=l=0,v=null,b=u.next();null!==m&&!b.done;g++,b=u.next()){m.index>g?(v=m,m=null):v=m.sibling;var y=d(o,m,b.value,c);if(null===y){null===m&&(m=v);break}e&&m&&null===y.alternate&&t(o,m),l=i(y,l,g),null===f?s=y:f.sibling=y,f=y,m=v}if(b.done)return n(o,m),s;if(null===m){for(;!b.done;g++,b=u.next())null!==(b=p(o,b.value,c))&&(l=i(b,l,g),null===f?s=b:f.sibling=b,f=b);return s}for(m=r(o,m);!b.done;g++,b=u.next())null!==(b=h(m,o,g,b.value,c))&&(e&&null!==b.alternate&&m.delete(null===b.key?g:b.key),l=i(b,l,g),null===f?s=b:f.sibling=b,f=b);return e&&m.forEach((function(e){return t(o,e)})),s}return function(e,r,i,u){var c="object"==typeof i&&null!==i&&i.type===ne&&null===i.key;c&&(i=i.props.children);var s="object"==typeof i&&null!==i;if(s)switch(i.$$typeof){case ee:e:{for(s=i.key,c=r;null!==c;){if(c.key===s){switch(c.tag){case 7:if(i.type===ne){n(e,c.sibling),(r=o(c,i.props.children)).return=e,e=r;break e}break;default:if(c.elementType===i.type){n(e,c.sibling),(r=o(c,i.props)).ref=Ei(e,c,i),r.return=e,e=r;break e}}n(e,c);break}t(e,c),c=c.sibling}i.type===ne?((r=_u(i.props.children,e.mode,u,i.key)).return=e,e=r):((u=Tu(i.type,i.key,i.props,null,e.mode,u)).ref=Ei(e,r,i),u.return=e,e=u)}return l(e);case te:e:{for(c=i.key;null!==r;){if(r.key===c){if(4===r.tag&&r.stateNode.containerInfo===i.containerInfo&&r.stateNode.implementation===i.implementation){n(e,r.sibling),(r=o(r,i.children||[])).return=e,e=r;break e}n(e,r);break}t(e,r),r=r.sibling}(r=Au(i,e.mode,u)).return=e,e=r}return l(e)}if("string"==typeof i||"number"==typeof i)return i=""+i,null!==r&&6===r.tag?(n(e,r.sibling),(r=o(r,i)).return=e,e=r):(n(e,r),(r=Pu(i,e.mode,u)).return=e,e=r),l(e);if(xi(i))return m(e,r,i,u);if(me(i))return g(e,r,i,u);if(s&&ki(e,i),void 0===i&&!c)switch(e.tag){case 1:case 0:throw e=e.type,Error(a(152,e.displayName||e.name||"Component"))}return n(e,r)}}var Oi=Si(!0),Ci=Si(!1),Ti={},_i={current:Ti},Pi={current:Ti},Ai={current:Ti};function Mi(e){if(e===Ti)throw Error(a(174));return e}function zi(e,t){switch(co(Ai,t),co(Pi,e),co(_i,Ti),e=t.nodeType){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:Ie(null,"");break;default:t=Ie(t=(e=8===e?t.parentNode:t).namespaceURI||null,e=e.tagName)}uo(_i),co(_i,t)}function Fi(){uo(_i),uo(Pi),uo(Ai)}function ji(e){Mi(Ai.current);var t=Mi(_i.current),n=Ie(t,e.type);t!==n&&(co(Pi,e),co(_i,n))}function Di(e){Pi.current===e&&(uo(_i),uo(Pi))}var Ri={current:0};function Ii(e){for(var t=e;null!==t;){if(13===t.tag){var n=t.memoizedState;if(null!==n&&(null===(n=n.dehydrated)||"$?"===n.data||"$!"===n.data))return t}else if(19===t.tag&&void 0!==t.memoizedProps.revealOrder){if(0!=(64&t.effectTag))return t}else if(null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}function Li(e,t){return{responder:e,props:t}}var Ni=q.ReactCurrentDispatcher,Vi=q.ReactCurrentBatchConfig,Hi=0,Bi=null,Wi=null,Ui=null,$i=!1;function Yi(){throw Error(a(321))}function Qi(e,t){if(null===t)return!1;for(var n=0;n<t.length&&n<e.length;n++)if(!Ir(e[n],t[n]))return!1;return!0}function Xi(e,t,n,r,o,i){if(Hi=i,Bi=t,t.memoizedState=null,t.updateQueue=null,t.expirationTime=0,Ni.current=null===e||null===e.memoizedState?va:ba,e=n(r,o),t.expirationTime===Hi){i=0;do{if(t.expirationTime=0,!(25>i))throw Error(a(301));i+=1,Ui=Wi=null,t.updateQueue=null,Ni.current=ya,e=n(r,o)}while(t.expirationTime===Hi)}if(Ni.current=ga,t=null!==Wi&&null!==Wi.next,Hi=0,Ui=Wi=Bi=null,$i=!1,t)throw Error(a(300));return e}function Gi(){var e={memoizedState:null,baseState:null,baseQueue:null,queue:null,next:null};return null===Ui?Bi.memoizedState=Ui=e:Ui=Ui.next=e,Ui}function qi(){if(null===Wi){var e=Bi.alternate;e=null!==e?e.memoizedState:null}else e=Wi.next;var t=null===Ui?Bi.memoizedState:Ui.next;if(null!==t)Ui=t,Wi=e;else{if(null===e)throw Error(a(310));e={memoizedState:(Wi=e).memoizedState,baseState:Wi.baseState,baseQueue:Wi.baseQueue,queue:Wi.queue,next:null},null===Ui?Bi.memoizedState=Ui=e:Ui=Ui.next=e}return Ui}function Ki(e,t){return"function"==typeof t?t(e):t}function Zi(e){var t=qi(),n=t.queue;if(null===n)throw Error(a(311));n.lastRenderedReducer=e;var r=Wi,o=r.baseQueue,i=n.pending;if(null!==i){if(null!==o){var l=o.next;o.next=i.next,i.next=l}r.baseQueue=o=i,n.pending=null}if(null!==o){o=o.next,r=r.baseState;var u=l=i=null,c=o;do{var s=c.expirationTime;if(s<Hi){var f={expirationTime:c.expirationTime,suspenseConfig:c.suspenseConfig,action:c.action,eagerReducer:c.eagerReducer,eagerState:c.eagerState,next:null};null===u?(l=u=f,i=r):u=u.next=f,s>Bi.expirationTime&&(Bi.expirationTime=s,au(s))}else null!==u&&(u=u.next={expirationTime:1073741823,suspenseConfig:c.suspenseConfig,action:c.action,eagerReducer:c.eagerReducer,eagerState:c.eagerState,next:null}),iu(s,c.suspenseConfig),r=c.eagerReducer===e?c.eagerState:e(r,c.action);c=c.next}while(null!==c&&c!==o);null===u?i=r:u.next=l,Ir(r,t.memoizedState)||(Aa=!0),t.memoizedState=r,t.baseState=i,t.baseQueue=u,n.lastRenderedState=r}return[t.memoizedState,n.dispatch]}function Ji(e){var t=qi(),n=t.queue;if(null===n)throw Error(a(311));n.lastRenderedReducer=e;var r=n.dispatch,o=n.pending,i=t.memoizedState;if(null!==o){n.pending=null;var l=o=o.next;do{i=e(i,l.action),l=l.next}while(l!==o);Ir(i,t.memoizedState)||(Aa=!0),t.memoizedState=i,null===t.baseQueue&&(t.baseState=i),n.lastRenderedState=i}return[i,r]}function ea(e){var t=Gi();return"function"==typeof e&&(e=e()),t.memoizedState=t.baseState=e,e=(e=t.queue={pending:null,dispatch:null,lastRenderedReducer:Ki,lastRenderedState:e}).dispatch=ma.bind(null,Bi,e),[t.memoizedState,e]}function ta(e,t,n,r){return e={tag:e,create:t,destroy:n,deps:r,next:null},null===(t=Bi.updateQueue)?(t={lastEffect:null},Bi.updateQueue=t,t.lastEffect=e.next=e):null===(n=t.lastEffect)?t.lastEffect=e.next=e:(r=n.next,n.next=e,e.next=r,t.lastEffect=e),e}function na(){return qi().memoizedState}function ra(e,t,n,r){var o=Gi();Bi.effectTag|=e,o.memoizedState=ta(1|t,n,void 0,void 0===r?null:r)}function oa(e,t,n,r){var o=qi();r=void 0===r?null:r;var i=void 0;if(null!==Wi){var a=Wi.memoizedState;if(i=a.destroy,null!==r&&Qi(r,a.deps))return void ta(t,n,i,r)}Bi.effectTag|=e,o.memoizedState=ta(1|t,n,i,r)}function ia(e,t){return ra(516,4,e,t)}function aa(e,t){return oa(516,4,e,t)}function la(e,t){return oa(4,2,e,t)}function ua(e,t){return"function"==typeof t?(e=e(),t(e),function(){t(null)}):null!=t?(e=e(),t.current=e,function(){t.current=null}):void 0}function ca(e,t,n){return n=null!=n?n.concat([e]):null,oa(4,2,ua.bind(null,t,e),n)}function sa(){}function fa(e,t){return Gi().memoizedState=[e,void 0===t?null:t],e}function pa(e,t){var n=qi();t=void 0===t?null:t;var r=n.memoizedState;return null!==r&&null!==t&&Qi(t,r[1])?r[0]:(n.memoizedState=[e,t],e)}function da(e,t){var n=qi();t=void 0===t?null:t;var r=n.memoizedState;return null!==r&&null!==t&&Qi(t,r[1])?r[0]:(e=e(),n.memoizedState=[e,t],e)}function ha(e,t,n){var r=Ho();Wo(98>r?98:r,(function(){e(!0)})),Wo(97<r?97:r,(function(){var r=Vi.suspense;Vi.suspense=void 0===t?null:t;try{e(!1),n()}finally{Vi.suspense=r}}))}function ma(e,t,n){var r=Yl(),o=di.suspense;o={expirationTime:r=Ql(r,e,o),suspenseConfig:o,action:n,eagerReducer:null,eagerState:null,next:null};var i=t.pending;if(null===i?o.next=o:(o.next=i.next,i.next=o),t.pending=o,i=e.alternate,e===Bi||null!==i&&i===Bi)$i=!0,o.expirationTime=Hi,Bi.expirationTime=Hi;else{if(0===e.expirationTime&&(null===i||0===i.expirationTime)&&null!==(i=t.lastRenderedReducer))try{var a=t.lastRenderedState,l=i(a,n);if(o.eagerReducer=i,o.eagerState=l,Ir(l,a))return}catch(e){}Xl(e,r)}}var ga={readContext:oi,useCallback:Yi,useContext:Yi,useEffect:Yi,useImperativeHandle:Yi,useLayoutEffect:Yi,useMemo:Yi,useReducer:Yi,useRef:Yi,useState:Yi,useDebugValue:Yi,useResponder:Yi,useDeferredValue:Yi,useTransition:Yi},va={readContext:oi,useCallback:fa,useContext:oi,useEffect:ia,useImperativeHandle:function(e,t,n){return n=null!=n?n.concat([e]):null,ra(4,2,ua.bind(null,t,e),n)},useLayoutEffect:function(e,t){return ra(4,2,e,t)},useMemo:function(e,t){var n=Gi();return t=void 0===t?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Gi();return t=void 0!==n?n(t):t,r.memoizedState=r.baseState=t,e=(e=r.queue={pending:null,dispatch:null,lastRenderedReducer:e,lastRenderedState:t}).dispatch=ma.bind(null,Bi,e),[r.memoizedState,e]},useRef:function(e){return e={current:e},Gi().memoizedState=e},useState:ea,useDebugValue:sa,useResponder:Li,useDeferredValue:function(e,t){var n=ea(e),r=n[0],o=n[1];return ia((function(){var n=Vi.suspense;Vi.suspense=void 0===t?null:t;try{o(e)}finally{Vi.suspense=n}}),[e,t]),r},useTransition:function(e){var t=ea(!1),n=t[0];return t=t[1],[fa(ha.bind(null,t,e),[t,e]),n]}},ba={readContext:oi,useCallback:pa,useContext:oi,useEffect:aa,useImperativeHandle:ca,useLayoutEffect:la,useMemo:da,useReducer:Zi,useRef:na,useState:function(){return Zi(Ki)},useDebugValue:sa,useResponder:Li,useDeferredValue:function(e,t){var n=Zi(Ki),r=n[0],o=n[1];return aa((function(){var n=Vi.suspense;Vi.suspense=void 0===t?null:t;try{o(e)}finally{Vi.suspense=n}}),[e,t]),r},useTransition:function(e){var t=Zi(Ki),n=t[0];return t=t[1],[pa(ha.bind(null,t,e),[t,e]),n]}},ya={readContext:oi,useCallback:pa,useContext:oi,useEffect:aa,useImperativeHandle:ca,useLayoutEffect:la,useMemo:da,useReducer:Ji,useRef:na,useState:function(){return Ji(Ki)},useDebugValue:sa,useResponder:Li,useDeferredValue:function(e,t){var n=Ji(Ki),r=n[0],o=n[1];return aa((function(){var n=Vi.suspense;Vi.suspense=void 0===t?null:t;try{o(e)}finally{Vi.suspense=n}}),[e,t]),r},useTransition:function(e){var t=Ji(Ki),n=t[0];return t=t[1],[pa(ha.bind(null,t,e),[t,e]),n]}},wa=null,xa=null,Ea=!1;function ka(e,t){var n=Su(5,null,null,0);n.elementType="DELETED",n.type="DELETED",n.stateNode=t,n.return=e,n.effectTag=8,null!==e.lastEffect?(e.lastEffect.nextEffect=n,e.lastEffect=n):e.firstEffect=e.lastEffect=n}function Sa(e,t){switch(e.tag){case 5:var n=e.type;return null!==(t=1!==t.nodeType||n.toLowerCase()!==t.nodeName.toLowerCase()?null:t)&&(e.stateNode=t,!0);case 6:return null!==(t=""===e.pendingProps||3!==t.nodeType?null:t)&&(e.stateNode=t,!0);case 13:default:return!1}}function Oa(e){if(Ea){var t=xa;if(t){var n=t;if(!Sa(e,t)){if(!(t=xn(n.nextSibling))||!Sa(e,t))return e.effectTag=-1025&e.effectTag|2,Ea=!1,void(wa=e);ka(wa,n)}wa=e,xa=xn(t.firstChild)}else e.effectTag=-1025&e.effectTag|2,Ea=!1,wa=e}}function Ca(e){for(e=e.return;null!==e&&5!==e.tag&&3!==e.tag&&13!==e.tag;)e=e.return;wa=e}function Ta(e){if(e!==wa)return!1;if(!Ea)return Ca(e),Ea=!0,!1;var t=e.type;if(5!==e.tag||"head"!==t&&"body"!==t&&!bn(t,e.memoizedProps))for(t=xa;t;)ka(e,t),t=xn(t.nextSibling);if(Ca(e),13===e.tag){if(!(e=null!==(e=e.memoizedState)?e.dehydrated:null))throw Error(a(317));e:{for(e=e.nextSibling,t=0;e;){if(8===e.nodeType){var n=e.data;if("/$"===n){if(0===t){xa=xn(e.nextSibling);break e}t--}else"$"!==n&&"$!"!==n&&"$?"!==n||t++}e=e.nextSibling}xa=null}}else xa=wa?xn(e.stateNode.nextSibling):null;return!0}function _a(){xa=wa=null,Ea=!1}var Pa=q.ReactCurrentOwner,Aa=!1;function Ma(e,t,n,r){t.child=null===e?Ci(t,null,n,r):Oi(t,e.child,n,r)}function za(e,t,n,r,o){n=n.render;var i=t.ref;return ri(t,o),r=Xi(e,t,n,r,i,o),null===e||Aa?(t.effectTag|=1,Ma(e,t,r,o),t.child):(t.updateQueue=e.updateQueue,t.effectTag&=-517,e.expirationTime<=o&&(e.expirationTime=0),Xa(e,t,o))}function Fa(e,t,n,r,o,i){if(null===e){var a=n.type;return"function"!=typeof a||Ou(a)||void 0!==a.defaultProps||null!==n.compare||void 0!==n.defaultProps?((e=Tu(n.type,null,r,null,t.mode,i)).ref=t.ref,e.return=t,t.child=e):(t.tag=15,t.type=a,ja(e,t,a,r,o,i))}return a=e.child,o<i&&(o=a.memoizedProps,(n=null!==(n=n.compare)?n:Nr)(o,r)&&e.ref===t.ref)?Xa(e,t,i):(t.effectTag|=1,(e=Cu(a,r)).ref=t.ref,e.return=t,t.child=e)}function ja(e,t,n,r,o,i){return null!==e&&Nr(e.memoizedProps,r)&&e.ref===t.ref&&(Aa=!1,o<i)?(t.expirationTime=e.expirationTime,Xa(e,t,i)):Ra(e,t,n,r,i)}function Da(e,t){var n=t.ref;(null===e&&null!==n||null!==e&&e.ref!==n)&&(t.effectTag|=128)}function Ra(e,t,n,r,o){var i=go(n)?ho:fo.current;return i=mo(t,i),ri(t,o),n=Xi(e,t,n,r,i,o),null===e||Aa?(t.effectTag|=1,Ma(e,t,n,o),t.child):(t.updateQueue=e.updateQueue,t.effectTag&=-517,e.expirationTime<=o&&(e.expirationTime=0),Xa(e,t,o))}function Ia(e,t,n,r,o){if(go(n)){var i=!0;wo(t)}else i=!1;if(ri(t,o),null===t.stateNode)null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),bi(t,n,r),wi(t,n,r,o),r=!0;else if(null===e){var a=t.stateNode,l=t.memoizedProps;a.props=l;var u=a.context,c=n.contextType;"object"==typeof c&&null!==c?c=oi(c):c=mo(t,c=go(n)?ho:fo.current);var s=n.getDerivedStateFromProps,f="function"==typeof s||"function"==typeof a.getSnapshotBeforeUpdate;f||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(l!==r||u!==c)&&yi(t,a,r,c),ii=!1;var p=t.memoizedState;a.state=p,fi(t,r,a,o),u=t.memoizedState,l!==r||p!==u||po.current||ii?("function"==typeof s&&(mi(t,n,s,r),u=t.memoizedState),(l=ii||vi(t,n,l,r,p,u,c))?(f||"function"!=typeof a.UNSAFE_componentWillMount&&"function"!=typeof a.componentWillMount||("function"==typeof a.componentWillMount&&a.componentWillMount(),"function"==typeof a.UNSAFE_componentWillMount&&a.UNSAFE_componentWillMount()),"function"==typeof a.componentDidMount&&(t.effectTag|=4)):("function"==typeof a.componentDidMount&&(t.effectTag|=4),t.memoizedProps=r,t.memoizedState=u),a.props=r,a.state=u,a.context=c,r=l):("function"==typeof a.componentDidMount&&(t.effectTag|=4),r=!1)}else a=t.stateNode,li(e,t),l=t.memoizedProps,a.props=t.type===t.elementType?l:Go(t.type,l),u=a.context,"object"==typeof(c=n.contextType)&&null!==c?c=oi(c):c=mo(t,c=go(n)?ho:fo.current),(f="function"==typeof(s=n.getDerivedStateFromProps)||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(l!==r||u!==c)&&yi(t,a,r,c),ii=!1,u=t.memoizedState,a.state=u,fi(t,r,a,o),p=t.memoizedState,l!==r||u!==p||po.current||ii?("function"==typeof s&&(mi(t,n,s,r),p=t.memoizedState),(s=ii||vi(t,n,l,r,u,p,c))?(f||"function"!=typeof a.UNSAFE_componentWillUpdate&&"function"!=typeof a.componentWillUpdate||("function"==typeof a.componentWillUpdate&&a.componentWillUpdate(r,p,c),"function"==typeof a.UNSAFE_componentWillUpdate&&a.UNSAFE_componentWillUpdate(r,p,c)),"function"==typeof a.componentDidUpdate&&(t.effectTag|=4),"function"==typeof a.getSnapshotBeforeUpdate&&(t.effectTag|=256)):("function"!=typeof a.componentDidUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=256),t.memoizedProps=r,t.memoizedState=p),a.props=r,a.state=p,a.context=c,r=s):("function"!=typeof a.componentDidUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=256),r=!1);return La(e,t,n,r,i,o)}function La(e,t,n,r,o,i){Da(e,t);var a=0!=(64&t.effectTag);if(!r&&!a)return o&&xo(t,n,!1),Xa(e,t,i);r=t.stateNode,Pa.current=t;var l=a&&"function"!=typeof n.getDerivedStateFromError?null:r.render();return t.effectTag|=1,null!==e&&a?(t.child=Oi(t,e.child,null,i),t.child=Oi(t,null,l,i)):Ma(e,t,l,i),t.memoizedState=r.state,o&&xo(t,n,!0),t.child}function Na(e){var t=e.stateNode;t.pendingContext?bo(0,t.pendingContext,t.pendingContext!==t.context):t.context&&bo(0,t.context,!1),zi(e,t.containerInfo)}var Va,Ha,Ba,Wa={dehydrated:null,retryTime:0};function Ua(e,t,n){var r,o=t.mode,i=t.pendingProps,a=Ri.current,l=!1;if((r=0!=(64&t.effectTag))||(r=0!=(2&a)&&(null===e||null!==e.memoizedState)),r?(l=!0,t.effectTag&=-65):null!==e&&null===e.memoizedState||void 0===i.fallback||!0===i.unstable_avoidThisFallback||(a|=1),co(Ri,1&a),null===e){if(void 0!==i.fallback&&Oa(t),l){if(l=i.fallback,(i=_u(null,o,0,null)).return=t,0==(2&t.mode))for(e=null!==t.memoizedState?t.child.child:t.child,i.child=e;null!==e;)e.return=i,e=e.sibling;return(n=_u(l,o,n,null)).return=t,i.sibling=n,t.memoizedState=Wa,t.child=i,n}return o=i.children,t.memoizedState=null,t.child=Ci(t,null,o,n)}if(null!==e.memoizedState){if(o=(e=e.child).sibling,l){if(i=i.fallback,(n=Cu(e,e.pendingProps)).return=t,0==(2&t.mode)&&(l=null!==t.memoizedState?t.child.child:t.child)!==e.child)for(n.child=l;null!==l;)l.return=n,l=l.sibling;return(o=Cu(o,i)).return=t,n.sibling=o,n.childExpirationTime=0,t.memoizedState=Wa,t.child=n,o}return n=Oi(t,e.child,i.children,n),t.memoizedState=null,t.child=n}if(e=e.child,l){if(l=i.fallback,(i=_u(null,o,0,null)).return=t,i.child=e,null!==e&&(e.return=i),0==(2&t.mode))for(e=null!==t.memoizedState?t.child.child:t.child,i.child=e;null!==e;)e.return=i,e=e.sibling;return(n=_u(l,o,n,null)).return=t,i.sibling=n,n.effectTag|=2,i.childExpirationTime=0,t.memoizedState=Wa,t.child=i,n}return t.memoizedState=null,t.child=Oi(t,e,i.children,n)}function $a(e,t){e.expirationTime<t&&(e.expirationTime=t);var n=e.alternate;null!==n&&n.expirationTime<t&&(n.expirationTime=t),ni(e.return,t)}function Ya(e,t,n,r,o,i){var a=e.memoizedState;null===a?e.memoizedState={isBackwards:t,rendering:null,renderingStartTime:0,last:r,tail:n,tailExpiration:0,tailMode:o,lastEffect:i}:(a.isBackwards=t,a.rendering=null,a.renderingStartTime=0,a.last=r,a.tail=n,a.tailExpiration=0,a.tailMode=o,a.lastEffect=i)}function Qa(e,t,n){var r=t.pendingProps,o=r.revealOrder,i=r.tail;if(Ma(e,t,r.children,n),0!=(2&(r=Ri.current)))r=1&r|2,t.effectTag|=64;else{if(null!==e&&0!=(64&e.effectTag))e:for(e=t.child;null!==e;){if(13===e.tag)null!==e.memoizedState&&$a(e,n);else if(19===e.tag)$a(e,n);else if(null!==e.child){e.child.return=e,e=e.child;continue}if(e===t)break e;for(;null===e.sibling;){if(null===e.return||e.return===t)break e;e=e.return}e.sibling.return=e.return,e=e.sibling}r&=1}if(co(Ri,r),0==(2&t.mode))t.memoizedState=null;else switch(o){case"forwards":for(n=t.child,o=null;null!==n;)null!==(e=n.alternate)&&null===Ii(e)&&(o=n),n=n.sibling;null===(n=o)?(o=t.child,t.child=null):(o=n.sibling,n.sibling=null),Ya(t,!1,o,n,i,t.lastEffect);break;case"backwards":for(n=null,o=t.child,t.child=null;null!==o;){if(null!==(e=o.alternate)&&null===Ii(e)){t.child=o;break}e=o.sibling,o.sibling=n,n=o,o=e}Ya(t,!0,n,null,i,t.lastEffect);break;case"together":Ya(t,!1,null,null,void 0,t.lastEffect);break;default:t.memoizedState=null}return t.child}function Xa(e,t,n){null!==e&&(t.dependencies=e.dependencies);var r=t.expirationTime;if(0!==r&&au(r),t.childExpirationTime<n)return null;if(null!==e&&t.child!==e.child)throw Error(a(153));if(null!==t.child){for(n=Cu(e=t.child,e.pendingProps),t.child=n,n.return=t;null!==e.sibling;)e=e.sibling,(n=n.sibling=Cu(e,e.pendingProps)).return=t;n.sibling=null}return t.child}function Ga(e,t){switch(e.tailMode){case"hidden":t=e.tail;for(var n=null;null!==t;)null!==t.alternate&&(n=t),t=t.sibling;null===n?e.tail=null:n.sibling=null;break;case"collapsed":n=e.tail;for(var r=null;null!==n;)null!==n.alternate&&(r=n),n=n.sibling;null===r?t||null===e.tail?e.tail=null:e.tail.sibling=null:r.sibling=null}}function qa(e,t,n){var r=t.pendingProps;switch(t.tag){case 2:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:return null;case 1:return go(t.type)&&vo(),null;case 3:return Fi(),uo(po),uo(fo),(n=t.stateNode).pendingContext&&(n.context=n.pendingContext,n.pendingContext=null),null!==e&&null!==e.child||!Ta(t)||(t.effectTag|=4),null;case 5:Di(t),n=Mi(Ai.current);var i=t.type;if(null!==e&&null!=t.stateNode)Ha(e,t,i,r,n),e.ref!==t.ref&&(t.effectTag|=128);else{if(!r){if(null===t.stateNode)throw Error(a(166));return null}if(e=Mi(_i.current),Ta(t)){r=t.stateNode,i=t.type;var l=t.memoizedProps;switch(r[Sn]=t,r[On]=l,i){case"iframe":case"object":case"embed":Qt("load",r);break;case"video":case"audio":for(e=0;e<qe.length;e++)Qt(qe[e],r);break;case"source":Qt("error",r);break;case"img":case"image":case"link":Qt("error",r),Qt("load",r);break;case"form":Qt("reset",r),Qt("submit",r);break;case"details":Qt("toggle",r);break;case"input":ke(r,l),Qt("invalid",r),un(n,"onChange");break;case"select":r._wrapperState={wasMultiple:!!l.multiple},Qt("invalid",r),un(n,"onChange");break;case"textarea":Me(r,l),Qt("invalid",r),un(n,"onChange")}for(var u in on(i,l),e=null,l)if(l.hasOwnProperty(u)){var c=l[u];"children"===u?"string"==typeof c?r.textContent!==c&&(e=["children",c]):"number"==typeof c&&r.textContent!==""+c&&(e=["children",""+c]):S.hasOwnProperty(u)&&null!=c&&un(n,u)}switch(i){case"input":we(r),Ce(r,l,!0);break;case"textarea":we(r),Fe(r);break;case"select":case"option":break;default:"function"==typeof l.onClick&&(r.onclick=cn)}n=e,t.updateQueue=n,null!==n&&(t.effectTag|=4)}else{switch(u=9===n.nodeType?n:n.ownerDocument,e===ln&&(e=Re(i)),e===ln?"script"===i?((e=u.createElement("div")).innerHTML="<script><\/script>",e=e.removeChild(e.firstChild)):"string"==typeof r.is?e=u.createElement(i,{is:r.is}):(e=u.createElement(i),"select"===i&&(u=e,r.multiple?u.multiple=!0:r.size&&(u.size=r.size))):e=u.createElementNS(e,i),e[Sn]=t,e[On]=r,Va(e,t),t.stateNode=e,u=an(i,r),i){case"iframe":case"object":case"embed":Qt("load",e),c=r;break;case"video":case"audio":for(c=0;c<qe.length;c++)Qt(qe[c],e);c=r;break;case"source":Qt("error",e),c=r;break;case"img":case"image":case"link":Qt("error",e),Qt("load",e),c=r;break;case"form":Qt("reset",e),Qt("submit",e),c=r;break;case"details":Qt("toggle",e),c=r;break;case"input":ke(e,r),c=Ee(e,r),Qt("invalid",e),un(n,"onChange");break;case"option":c=_e(e,r);break;case"select":e._wrapperState={wasMultiple:!!r.multiple},c=o({},r,{value:void 0}),Qt("invalid",e),un(n,"onChange");break;case"textarea":Me(e,r),c=Ae(e,r),Qt("invalid",e),un(n,"onChange");break;default:c=r}on(i,c);var s=c;for(l in s)if(s.hasOwnProperty(l)){var f=s[l];"style"===l?nn(e,f):"dangerouslySetInnerHTML"===l?null!=(f=f?f.__html:void 0)&&Ne(e,f):"children"===l?"string"==typeof f?("textarea"!==i||""!==f)&&Ve(e,f):"number"==typeof f&&Ve(e,""+f):"suppressContentEditableWarning"!==l&&"suppressHydrationWarning"!==l&&"autoFocus"!==l&&(S.hasOwnProperty(l)?null!=f&&un(n,l):null!=f&&K(e,l,f,u))}switch(i){case"input":we(e),Ce(e,r,!1);break;case"textarea":we(e),Fe(e);break;case"option":null!=r.value&&e.setAttribute("value",""+be(r.value));break;case"select":e.multiple=!!r.multiple,null!=(n=r.value)?Pe(e,!!r.multiple,n,!1):null!=r.defaultValue&&Pe(e,!!r.multiple,r.defaultValue,!0);break;default:"function"==typeof c.onClick&&(e.onclick=cn)}vn(i,r)&&(t.effectTag|=4)}null!==t.ref&&(t.effectTag|=128)}return null;case 6:if(e&&null!=t.stateNode)Ba(0,t,e.memoizedProps,r);else{if("string"!=typeof r&&null===t.stateNode)throw Error(a(166));n=Mi(Ai.current),Mi(_i.current),Ta(t)?(n=t.stateNode,r=t.memoizedProps,n[Sn]=t,n.nodeValue!==r&&(t.effectTag|=4)):((n=(9===n.nodeType?n:n.ownerDocument).createTextNode(r))[Sn]=t,t.stateNode=n)}return null;case 13:return uo(Ri),r=t.memoizedState,0!=(64&t.effectTag)?(t.expirationTime=n,t):(n=null!==r,r=!1,null===e?void 0!==t.memoizedProps.fallback&&Ta(t):(r=null!==(i=e.memoizedState),n||null===i||null!==(i=e.child.sibling)&&(null!==(l=t.firstEffect)?(t.firstEffect=i,i.nextEffect=l):(t.firstEffect=t.lastEffect=i,i.nextEffect=null),i.effectTag=8)),n&&!r&&0!=(2&t.mode)&&(null===e&&!0!==t.memoizedProps.unstable_avoidThisFallback||0!=(1&Ri.current)?Tl===wl&&(Tl=xl):(Tl!==wl&&Tl!==xl||(Tl=El),0!==zl&&null!==Sl&&(Fu(Sl,Cl),ju(Sl,zl)))),(n||r)&&(t.effectTag|=4),null);case 4:return Fi(),null;case 10:return ti(t),null;case 17:return go(t.type)&&vo(),null;case 19:if(uo(Ri),null===(r=t.memoizedState))return null;if(i=0!=(64&t.effectTag),null===(l=r.rendering)){if(i)Ga(r,!1);else if(Tl!==wl||null!==e&&0!=(64&e.effectTag))for(l=t.child;null!==l;){if(null!==(e=Ii(l))){for(t.effectTag|=64,Ga(r,!1),null!==(i=e.updateQueue)&&(t.updateQueue=i,t.effectTag|=4),null===r.lastEffect&&(t.firstEffect=null),t.lastEffect=r.lastEffect,r=t.child;null!==r;)l=n,(i=r).effectTag&=2,i.nextEffect=null,i.firstEffect=null,i.lastEffect=null,null===(e=i.alternate)?(i.childExpirationTime=0,i.expirationTime=l,i.child=null,i.memoizedProps=null,i.memoizedState=null,i.updateQueue=null,i.dependencies=null):(i.childExpirationTime=e.childExpirationTime,i.expirationTime=e.expirationTime,i.child=e.child,i.memoizedProps=e.memoizedProps,i.memoizedState=e.memoizedState,i.updateQueue=e.updateQueue,l=e.dependencies,i.dependencies=null===l?null:{expirationTime:l.expirationTime,firstContext:l.firstContext,responders:l.responders}),r=r.sibling;return co(Ri,1&Ri.current|2),t.child}l=l.sibling}}else{if(!i)if(null!==(e=Ii(l))){if(t.effectTag|=64,i=!0,null!==(n=e.updateQueue)&&(t.updateQueue=n,t.effectTag|=4),Ga(r,!0),null===r.tail&&"hidden"===r.tailMode&&!l.alternate)return null!==(t=t.lastEffect=r.lastEffect)&&(t.nextEffect=null),null}else 2*Vo()-r.renderingStartTime>r.tailExpiration&&1<n&&(t.effectTag|=64,i=!0,Ga(r,!1),t.expirationTime=t.childExpirationTime=n-1);r.isBackwards?(l.sibling=t.child,t.child=l):(null!==(n=r.last)?n.sibling=l:t.child=l,r.last=l)}return null!==r.tail?(0===r.tailExpiration&&(r.tailExpiration=Vo()+500),n=r.tail,r.rendering=n,r.tail=n.sibling,r.lastEffect=t.lastEffect,r.renderingStartTime=Vo(),n.sibling=null,t=Ri.current,co(Ri,i?1&t|2:1&t),n):null}throw Error(a(156,t.tag))}function Ka(e){switch(e.tag){case 1:go(e.type)&&vo();var t=e.effectTag;return 4096&t?(e.effectTag=-4097&t|64,e):null;case 3:if(Fi(),uo(po),uo(fo),0!=(64&(t=e.effectTag)))throw Error(a(285));return e.effectTag=-4097&t|64,e;case 5:return Di(e),null;case 13:return uo(Ri),4096&(t=e.effectTag)?(e.effectTag=-4097&t|64,e):null;case 19:return uo(Ri),null;case 4:return Fi(),null;case 10:return ti(e),null;default:return null}}function Za(e,t){return{value:e,source:t,stack:ve(t)}}Va=function(e,t){for(var n=t.child;null!==n;){if(5===n.tag||6===n.tag)e.appendChild(n.stateNode);else if(4!==n.tag&&null!==n.child){n.child.return=n,n=n.child;continue}if(n===t)break;for(;null===n.sibling;){if(null===n.return||n.return===t)return;n=n.return}n.sibling.return=n.return,n=n.sibling}},Ha=function(e,t,n,r,i){var a=e.memoizedProps;if(a!==r){var l,u,c=t.stateNode;switch(Mi(_i.current),e=null,n){case"input":a=Ee(c,a),r=Ee(c,r),e=[];break;case"option":a=_e(c,a),r=_e(c,r),e=[];break;case"select":a=o({},a,{value:void 0}),r=o({},r,{value:void 0}),e=[];break;case"textarea":a=Ae(c,a),r=Ae(c,r),e=[];break;default:"function"!=typeof a.onClick&&"function"==typeof r.onClick&&(c.onclick=cn)}for(l in on(n,r),n=null,a)if(!r.hasOwnProperty(l)&&a.hasOwnProperty(l)&&null!=a[l])if("style"===l)for(u in c=a[l])c.hasOwnProperty(u)&&(n||(n={}),n[u]="");else"dangerouslySetInnerHTML"!==l&&"children"!==l&&"suppressContentEditableWarning"!==l&&"suppressHydrationWarning"!==l&&"autoFocus"!==l&&(S.hasOwnProperty(l)?e||(e=[]):(e=e||[]).push(l,null));for(l in r){var s=r[l];if(c=null!=a?a[l]:void 0,r.hasOwnProperty(l)&&s!==c&&(null!=s||null!=c))if("style"===l)if(c){for(u in c)!c.hasOwnProperty(u)||s&&s.hasOwnProperty(u)||(n||(n={}),n[u]="");for(u in s)s.hasOwnProperty(u)&&c[u]!==s[u]&&(n||(n={}),n[u]=s[u])}else n||(e||(e=[]),e.push(l,n)),n=s;else"dangerouslySetInnerHTML"===l?(s=s?s.__html:void 0,c=c?c.__html:void 0,null!=s&&c!==s&&(e=e||[]).push(l,s)):"children"===l?c===s||"string"!=typeof s&&"number"!=typeof s||(e=e||[]).push(l,""+s):"suppressContentEditableWarning"!==l&&"suppressHydrationWarning"!==l&&(S.hasOwnProperty(l)?(null!=s&&un(i,l),e||c===s||(e=[])):(e=e||[]).push(l,s))}n&&(e=e||[]).push("style",n),i=e,(t.updateQueue=i)&&(t.effectTag|=4)}},Ba=function(e,t,n,r){n!==r&&(t.effectTag|=4)};var Ja="function"==typeof WeakSet?WeakSet:Set;function el(e,t){var n=t.source,r=t.stack;null===r&&null!==n&&(r=ve(n)),null!==n&&ge(n.type),t=t.value,null!==e&&1===e.tag&&ge(e.type);try{console.error(t)}catch(e){setTimeout((function(){throw e}))}}function tl(e){var t=e.ref;if(null!==t)if("function"==typeof t)try{t(null)}catch(t){bu(e,t)}else t.current=null}function nl(e,t){switch(t.tag){case 0:case 11:case 15:case 22:return;case 1:if(256&t.effectTag&&null!==e){var n=e.memoizedProps,r=e.memoizedState;t=(e=t.stateNode).getSnapshotBeforeUpdate(t.elementType===t.type?n:Go(t.type,n),r),e.__reactInternalSnapshotBeforeUpdate=t}return;case 3:case 5:case 6:case 4:case 17:return}throw Error(a(163))}function rl(e,t){if(null!==(t=null!==(t=t.updateQueue)?t.lastEffect:null)){var n=t=t.next;do{if((n.tag&e)===e){var r=n.destroy;n.destroy=void 0,void 0!==r&&r()}n=n.next}while(n!==t)}}function ol(e,t){if(null!==(t=null!==(t=t.updateQueue)?t.lastEffect:null)){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function il(e,t,n){switch(n.tag){case 0:case 11:case 15:case 22:return void ol(3,n);case 1:if(e=n.stateNode,4&n.effectTag)if(null===t)e.componentDidMount();else{var r=n.elementType===n.type?t.memoizedProps:Go(n.type,t.memoizedProps);e.componentDidUpdate(r,t.memoizedState,e.__reactInternalSnapshotBeforeUpdate)}return void(null!==(t=n.updateQueue)&&pi(n,t,e));case 3:if(null!==(t=n.updateQueue)){if(e=null,null!==n.child)switch(n.child.tag){case 5:e=n.child.stateNode;break;case 1:e=n.child.stateNode}pi(n,t,e)}return;case 5:return e=n.stateNode,void(null===t&&4&n.effectTag&&vn(n.type,n.memoizedProps)&&e.focus());case 6:case 4:case 12:return;case 13:return void(null===n.memoizedState&&(n=n.alternate,null!==n&&(n=n.memoizedState,null!==n&&(n=n.dehydrated,null!==n&&Rt(n)))));case 19:case 17:case 20:case 21:return}throw Error(a(163))}function al(e,t,n){switch("function"==typeof Eu&&Eu(t),t.tag){case 0:case 11:case 14:case 15:case 22:if(null!==(e=t.updateQueue)&&null!==(e=e.lastEffect)){var r=e.next;Wo(97<n?97:n,(function(){var e=r;do{var n=e.destroy;if(void 0!==n){var o=t;try{n()}catch(e){bu(o,e)}}e=e.next}while(e!==r)}))}break;case 1:tl(t),"function"==typeof(n=t.stateNode).componentWillUnmount&&function(e,t){try{t.props=e.memoizedProps,t.state=e.memoizedState,t.componentWillUnmount()}catch(t){bu(e,t)}}(t,n);break;case 5:tl(t);break;case 4:sl(e,t,n)}}function ll(e){var t=e.alternate;e.return=null,e.child=null,e.memoizedState=null,e.updateQueue=null,e.dependencies=null,e.alternate=null,e.firstEffect=null,e.lastEffect=null,e.pendingProps=null,e.memoizedProps=null,e.stateNode=null,null!==t&&ll(t)}function ul(e){return 5===e.tag||3===e.tag||4===e.tag}function cl(e){e:{for(var t=e.return;null!==t;){if(ul(t)){var n=t;break e}t=t.return}throw Error(a(160))}switch(t=n.stateNode,n.tag){case 5:var r=!1;break;case 3:case 4:t=t.containerInfo,r=!0;break;default:throw Error(a(161))}16&n.effectTag&&(Ve(t,""),n.effectTag&=-17);e:t:for(n=e;;){for(;null===n.sibling;){if(null===n.return||ul(n.return)){n=null;break e}n=n.return}for(n.sibling.return=n.return,n=n.sibling;5!==n.tag&&6!==n.tag&&18!==n.tag;){if(2&n.effectTag)continue t;if(null===n.child||4===n.tag)continue t;n.child.return=n,n=n.child}if(!(2&n.effectTag)){n=n.stateNode;break e}}r?function e(t,n,r){var o=t.tag,i=5===o||6===o;if(i)t=i?t.stateNode:t.stateNode.instance,n?8===r.nodeType?r.parentNode.insertBefore(t,n):r.insertBefore(t,n):(8===r.nodeType?(n=r.parentNode).insertBefore(t,r):(n=r).appendChild(t),null!==(r=r._reactRootContainer)&&void 0!==r||null!==n.onclick||(n.onclick=cn));else if(4!==o&&null!==(t=t.child))for(e(t,n,r),t=t.sibling;null!==t;)e(t,n,r),t=t.sibling}(e,n,t):function e(t,n,r){var o=t.tag,i=5===o||6===o;if(i)t=i?t.stateNode:t.stateNode.instance,n?r.insertBefore(t,n):r.appendChild(t);else if(4!==o&&null!==(t=t.child))for(e(t,n,r),t=t.sibling;null!==t;)e(t,n,r),t=t.sibling}(e,n,t)}function sl(e,t,n){for(var r,o,i=t,l=!1;;){if(!l){l=i.return;e:for(;;){if(null===l)throw Error(a(160));switch(r=l.stateNode,l.tag){case 5:o=!1;break e;case 3:case 4:r=r.containerInfo,o=!0;break e}l=l.return}l=!0}if(5===i.tag||6===i.tag){e:for(var u=e,c=i,s=n,f=c;;)if(al(u,f,s),null!==f.child&&4!==f.tag)f.child.return=f,f=f.child;else{if(f===c)break e;for(;null===f.sibling;){if(null===f.return||f.return===c)break e;f=f.return}f.sibling.return=f.return,f=f.sibling}o?(u=r,c=i.stateNode,8===u.nodeType?u.parentNode.removeChild(c):u.removeChild(c)):r.removeChild(i.stateNode)}else if(4===i.tag){if(null!==i.child){r=i.stateNode.containerInfo,o=!0,i.child.return=i,i=i.child;continue}}else if(al(e,i,n),null!==i.child){i.child.return=i,i=i.child;continue}if(i===t)break;for(;null===i.sibling;){if(null===i.return||i.return===t)return;4===(i=i.return).tag&&(l=!1)}i.sibling.return=i.return,i=i.sibling}}function fl(e,t){switch(t.tag){case 0:case 11:case 14:case 15:case 22:return void rl(3,t);case 1:return;case 5:var n=t.stateNode;if(null!=n){var r=t.memoizedProps,o=null!==e?e.memoizedProps:r;e=t.type;var i=t.updateQueue;if(t.updateQueue=null,null!==i){for(n[On]=r,"input"===e&&"radio"===r.type&&null!=r.name&&Se(n,r),an(e,o),t=an(e,r),o=0;o<i.length;o+=2){var l=i[o],u=i[o+1];"style"===l?nn(n,u):"dangerouslySetInnerHTML"===l?Ne(n,u):"children"===l?Ve(n,u):K(n,l,u,t)}switch(e){case"input":Oe(n,r);break;case"textarea":ze(n,r);break;case"select":t=n._wrapperState.wasMultiple,n._wrapperState.wasMultiple=!!r.multiple,null!=(e=r.value)?Pe(n,!!r.multiple,e,!1):t!==!!r.multiple&&(null!=r.defaultValue?Pe(n,!!r.multiple,r.defaultValue,!0):Pe(n,!!r.multiple,r.multiple?[]:"",!1))}}}return;case 6:if(null===t.stateNode)throw Error(a(162));return void(t.stateNode.nodeValue=t.memoizedProps);case 3:return void((t=t.stateNode).hydrate&&(t.hydrate=!1,Rt(t.containerInfo)));case 12:return;case 13:if(n=t,null===t.memoizedState?r=!1:(r=!0,n=t.child,jl=Vo()),null!==n)e:for(e=n;;){if(5===e.tag)i=e.stateNode,r?"function"==typeof(i=i.style).setProperty?i.setProperty("display","none","important"):i.display="none":(i=e.stateNode,o=null!=(o=e.memoizedProps.style)&&o.hasOwnProperty("display")?o.display:null,i.style.display=tn("display",o));else if(6===e.tag)e.stateNode.nodeValue=r?"":e.memoizedProps;else{if(13===e.tag&&null!==e.memoizedState&&null===e.memoizedState.dehydrated){(i=e.child.sibling).return=e,e=i;continue}if(null!==e.child){e.child.return=e,e=e.child;continue}}if(e===n)break;for(;null===e.sibling;){if(null===e.return||e.return===n)break e;e=e.return}e.sibling.return=e.return,e=e.sibling}return void pl(t);case 19:return void pl(t);case 17:return}throw Error(a(163))}function pl(e){var t=e.updateQueue;if(null!==t){e.updateQueue=null;var n=e.stateNode;null===n&&(n=e.stateNode=new Ja),t.forEach((function(t){var r=wu.bind(null,e,t);n.has(t)||(n.add(t),t.then(r,r))}))}}var dl="function"==typeof WeakMap?WeakMap:Map;function hl(e,t,n){(n=ui(n,null)).tag=3,n.payload={element:null};var r=t.value;return n.callback=function(){Rl||(Rl=!0,Il=r),el(e,t)},n}function ml(e,t,n){(n=ui(n,null)).tag=3;var r=e.type.getDerivedStateFromError;if("function"==typeof r){var o=t.value;n.payload=function(){return el(e,t),r(o)}}var i=e.stateNode;return null!==i&&"function"==typeof i.componentDidCatch&&(n.callback=function(){"function"!=typeof r&&(null===Ll?Ll=new Set([this]):Ll.add(this),el(e,t));var n=t.stack;this.componentDidCatch(t.value,{componentStack:null!==n?n:""})}),n}var gl,vl=Math.ceil,bl=q.ReactCurrentDispatcher,yl=q.ReactCurrentOwner,wl=0,xl=3,El=4,kl=0,Sl=null,Ol=null,Cl=0,Tl=wl,_l=null,Pl=1073741823,Al=1073741823,Ml=null,zl=0,Fl=!1,jl=0,Dl=null,Rl=!1,Il=null,Ll=null,Nl=!1,Vl=null,Hl=90,Bl=null,Wl=0,Ul=null,$l=0;function Yl(){return 0!=(48&kl)?1073741821-(Vo()/10|0):0!==$l?$l:$l=1073741821-(Vo()/10|0)}function Ql(e,t,n){if(0==(2&(t=t.mode)))return 1073741823;var r=Ho();if(0==(4&t))return 99===r?1073741823:1073741822;if(0!=(16&kl))return Cl;if(null!==n)e=Xo(e,0|n.timeoutMs||5e3,250);else switch(r){case 99:e=1073741823;break;case 98:e=Xo(e,150,100);break;case 97:case 96:e=Xo(e,5e3,250);break;case 95:e=2;break;default:throw Error(a(326))}return null!==Sl&&e===Cl&&--e,e}function Xl(e,t){if(50<Wl)throw Wl=0,Ul=null,Error(a(185));if(null!==(e=Gl(e,t))){var n=Ho();1073741823===t?0!=(8&kl)&&0==(48&kl)?Jl(e):(Kl(e),0===kl&&Yo()):Kl(e),0==(4&kl)||98!==n&&99!==n||(null===Bl?Bl=new Map([[e,t]]):(void 0===(n=Bl.get(e))||n>t)&&Bl.set(e,t))}}function Gl(e,t){e.expirationTime<t&&(e.expirationTime=t);var n=e.alternate;null!==n&&n.expirationTime<t&&(n.expirationTime=t);var r=e.return,o=null;if(null===r&&3===e.tag)o=e.stateNode;else for(;null!==r;){if(n=r.alternate,r.childExpirationTime<t&&(r.childExpirationTime=t),null!==n&&n.childExpirationTime<t&&(n.childExpirationTime=t),null===r.return&&3===r.tag){o=r.stateNode;break}r=r.return}return null!==o&&(Sl===o&&(au(t),Tl===El&&Fu(o,Cl)),ju(o,t)),o}function ql(e){var t=e.lastExpiredTime;if(0!==t)return t;if(!zu(e,t=e.firstPendingTime))return t;var n=e.lastPingedTime;return 2>=(e=n>(e=e.nextKnownPendingLevel)?n:e)&&t!==e?0:e}function Kl(e){if(0!==e.lastExpiredTime)e.callbackExpirationTime=1073741823,e.callbackPriority=99,e.callbackNode=$o(Jl.bind(null,e));else{var t=ql(e),n=e.callbackNode;if(0===t)null!==n&&(e.callbackNode=null,e.callbackExpirationTime=0,e.callbackPriority=90);else{var r=Yl();if(1073741823===t?r=99:1===t||2===t?r=95:r=0>=(r=10*(1073741821-t)-10*(1073741821-r))?99:250>=r?98:5250>=r?97:95,null!==n){var o=e.callbackPriority;if(e.callbackExpirationTime===t&&o>=r)return;n!==Fo&&So(n)}e.callbackExpirationTime=t,e.callbackPriority=r,t=1073741823===t?$o(Jl.bind(null,e)):Uo(r,Zl.bind(null,e),{timeout:10*(1073741821-t)-Vo()}),e.callbackNode=t}}}function Zl(e,t){if($l=0,t)return Du(e,t=Yl()),Kl(e),null;var n=ql(e);if(0!==n){if(t=e.callbackNode,0!=(48&kl))throw Error(a(327));if(mu(),e===Sl&&n===Cl||nu(e,n),null!==Ol){var r=kl;kl|=16;for(var o=ou();;)try{uu();break}catch(t){ru(e,t)}if(ei(),kl=r,bl.current=o,1===Tl)throw t=_l,nu(e,n),Fu(e,n),Kl(e),t;if(null===Ol)switch(o=e.finishedWork=e.current.alternate,e.finishedExpirationTime=n,r=Tl,Sl=null,r){case wl:case 1:throw Error(a(345));case 2:Du(e,2<n?2:n);break;case xl:if(Fu(e,n),n===(r=e.lastSuspendedTime)&&(e.nextKnownPendingLevel=fu(o)),1073741823===Pl&&10<(o=jl+500-Vo())){if(Fl){var i=e.lastPingedTime;if(0===i||i>=n){e.lastPingedTime=n,nu(e,n);break}}if(0!==(i=ql(e))&&i!==n)break;if(0!==r&&r!==n){e.lastPingedTime=r;break}e.timeoutHandle=yn(pu.bind(null,e),o);break}pu(e);break;case El:if(Fu(e,n),n===(r=e.lastSuspendedTime)&&(e.nextKnownPendingLevel=fu(o)),Fl&&(0===(o=e.lastPingedTime)||o>=n)){e.lastPingedTime=n,nu(e,n);break}if(0!==(o=ql(e))&&o!==n)break;if(0!==r&&r!==n){e.lastPingedTime=r;break}if(1073741823!==Al?r=10*(1073741821-Al)-Vo():1073741823===Pl?r=0:(r=10*(1073741821-Pl)-5e3,0>(r=(o=Vo())-r)&&(r=0),(n=10*(1073741821-n)-o)<(r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*vl(r/1960))-r)&&(r=n)),10<r){e.timeoutHandle=yn(pu.bind(null,e),r);break}pu(e);break;case 5:if(1073741823!==Pl&&null!==Ml){i=Pl;var l=Ml;if(0>=(r=0|l.busyMinDurationMs)?r=0:(o=0|l.busyDelayMs,r=(i=Vo()-(10*(1073741821-i)-(0|l.timeoutMs||5e3)))<=o?0:o+r-i),10<r){Fu(e,n),e.timeoutHandle=yn(pu.bind(null,e),r);break}}pu(e);break;default:throw Error(a(329))}if(Kl(e),e.callbackNode===t)return Zl.bind(null,e)}}return null}function Jl(e){var t=e.lastExpiredTime;if(t=0!==t?t:1073741823,0!=(48&kl))throw Error(a(327));if(mu(),e===Sl&&t===Cl||nu(e,t),null!==Ol){var n=kl;kl|=16;for(var r=ou();;)try{lu();break}catch(t){ru(e,t)}if(ei(),kl=n,bl.current=r,1===Tl)throw n=_l,nu(e,t),Fu(e,t),Kl(e),n;if(null!==Ol)throw Error(a(261));e.finishedWork=e.current.alternate,e.finishedExpirationTime=t,Sl=null,pu(e),Kl(e)}return null}function eu(e,t){var n=kl;kl|=1;try{return e(t)}finally{0===(kl=n)&&Yo()}}function tu(e,t){var n=kl;kl&=-2,kl|=8;try{return e(t)}finally{0===(kl=n)&&Yo()}}function nu(e,t){e.finishedWork=null,e.finishedExpirationTime=0;var n=e.timeoutHandle;if(-1!==n&&(e.timeoutHandle=-1,wn(n)),null!==Ol)for(n=Ol.return;null!==n;){var r=n;switch(r.tag){case 1:null!=(r=r.type.childContextTypes)&&vo();break;case 3:Fi(),uo(po),uo(fo);break;case 5:Di(r);break;case 4:Fi();break;case 13:case 19:uo(Ri);break;case 10:ti(r)}n=n.return}Sl=e,Ol=Cu(e.current,null),Cl=t,Tl=wl,_l=null,Al=Pl=1073741823,Ml=null,zl=0,Fl=!1}function ru(e,t){for(;;){try{if(ei(),Ni.current=ga,$i)for(var n=Bi.memoizedState;null!==n;){var r=n.queue;null!==r&&(r.pending=null),n=n.next}if(Hi=0,Ui=Wi=Bi=null,$i=!1,null===Ol||null===Ol.return)return Tl=1,_l=t,Ol=null;e:{var o=e,i=Ol.return,a=Ol,l=t;if(t=Cl,a.effectTag|=2048,a.firstEffect=a.lastEffect=null,null!==l&&"object"==typeof l&&"function"==typeof l.then){var u=l;if(0==(2&a.mode)){var c=a.alternate;c?(a.updateQueue=c.updateQueue,a.memoizedState=c.memoizedState,a.expirationTime=c.expirationTime):(a.updateQueue=null,a.memoizedState=null)}var s=0!=(1&Ri.current),f=i;do{var p;if(p=13===f.tag){var d=f.memoizedState;if(null!==d)p=null!==d.dehydrated;else{var h=f.memoizedProps;p=void 0!==h.fallback&&(!0!==h.unstable_avoidThisFallback||!s)}}if(p){var m=f.updateQueue;if(null===m){var g=new Set;g.add(u),f.updateQueue=g}else m.add(u);if(0==(2&f.mode)){if(f.effectTag|=64,a.effectTag&=-2981,1===a.tag)if(null===a.alternate)a.tag=17;else{var v=ui(1073741823,null);v.tag=2,ci(a,v)}a.expirationTime=1073741823;break e}l=void 0,a=t;var b=o.pingCache;if(null===b?(b=o.pingCache=new dl,l=new Set,b.set(u,l)):void 0===(l=b.get(u))&&(l=new Set,b.set(u,l)),!l.has(a)){l.add(a);var y=yu.bind(null,o,u,a);u.then(y,y)}f.effectTag|=4096,f.expirationTime=t;break e}f=f.return}while(null!==f);l=Error((ge(a.type)||"A React component")+" suspended while rendering, but no fallback UI was specified.\n\nAdd a <Suspense fallback=...> component higher in the tree to provide a loading indicator or placeholder to display."+ve(a))}5!==Tl&&(Tl=2),l=Za(l,a),f=i;do{switch(f.tag){case 3:u=l,f.effectTag|=4096,f.expirationTime=t,si(f,hl(f,u,t));break e;case 1:u=l;var w=f.type,x=f.stateNode;if(0==(64&f.effectTag)&&("function"==typeof w.getDerivedStateFromError||null!==x&&"function"==typeof x.componentDidCatch&&(null===Ll||!Ll.has(x)))){f.effectTag|=4096,f.expirationTime=t,si(f,ml(f,u,t));break e}}f=f.return}while(null!==f)}Ol=su(Ol)}catch(e){t=e;continue}break}}function ou(){var e=bl.current;return bl.current=ga,null===e?ga:e}function iu(e,t){e<Pl&&2<e&&(Pl=e),null!==t&&e<Al&&2<e&&(Al=e,Ml=t)}function au(e){e>zl&&(zl=e)}function lu(){for(;null!==Ol;)Ol=cu(Ol)}function uu(){for(;null!==Ol&&!jo();)Ol=cu(Ol)}function cu(e){var t=gl(e.alternate,e,Cl);return e.memoizedProps=e.pendingProps,null===t&&(t=su(e)),yl.current=null,t}function su(e){Ol=e;do{var t=Ol.alternate;if(e=Ol.return,0==(2048&Ol.effectTag)){if(t=qa(t,Ol,Cl),1===Cl||1!==Ol.childExpirationTime){for(var n=0,r=Ol.child;null!==r;){var o=r.expirationTime,i=r.childExpirationTime;o>n&&(n=o),i>n&&(n=i),r=r.sibling}Ol.childExpirationTime=n}if(null!==t)return t;null!==e&&0==(2048&e.effectTag)&&(null===e.firstEffect&&(e.firstEffect=Ol.firstEffect),null!==Ol.lastEffect&&(null!==e.lastEffect&&(e.lastEffect.nextEffect=Ol.firstEffect),e.lastEffect=Ol.lastEffect),1<Ol.effectTag&&(null!==e.lastEffect?e.lastEffect.nextEffect=Ol:e.firstEffect=Ol,e.lastEffect=Ol))}else{if(null!==(t=Ka(Ol)))return t.effectTag&=2047,t;null!==e&&(e.firstEffect=e.lastEffect=null,e.effectTag|=2048)}if(null!==(t=Ol.sibling))return t;Ol=e}while(null!==Ol);return Tl===wl&&(Tl=5),null}function fu(e){var t=e.expirationTime;return t>(e=e.childExpirationTime)?t:e}function pu(e){var t=Ho();return Wo(99,du.bind(null,e,t)),null}function du(e,t){do{mu()}while(null!==Vl);if(0!=(48&kl))throw Error(a(327));var n=e.finishedWork,r=e.finishedExpirationTime;if(null===n)return null;if(e.finishedWork=null,e.finishedExpirationTime=0,n===e.current)throw Error(a(177));e.callbackNode=null,e.callbackExpirationTime=0,e.callbackPriority=90,e.nextKnownPendingLevel=0;var o=fu(n);if(e.firstPendingTime=o,r<=e.lastSuspendedTime?e.firstSuspendedTime=e.lastSuspendedTime=e.nextKnownPendingLevel=0:r<=e.firstSuspendedTime&&(e.firstSuspendedTime=r-1),r<=e.lastPingedTime&&(e.lastPingedTime=0),r<=e.lastExpiredTime&&(e.lastExpiredTime=0),e===Sl&&(Ol=Sl=null,Cl=0),1<n.effectTag?null!==n.lastEffect?(n.lastEffect.nextEffect=n,o=n.firstEffect):o=n:o=n.firstEffect,null!==o){var i=kl;kl|=32,yl.current=null,mn=Yt;var l=dn();if(hn(l)){if("selectionStart"in l)var u={start:l.selectionStart,end:l.selectionEnd};else e:{var c=(u=(u=l.ownerDocument)&&u.defaultView||window).getSelection&&u.getSelection();if(c&&0!==c.rangeCount){u=c.anchorNode;var s=c.anchorOffset,f=c.focusNode;c=c.focusOffset;try{u.nodeType,f.nodeType}catch(e){u=null;break e}var p=0,d=-1,h=-1,m=0,g=0,v=l,b=null;t:for(;;){for(var y;v!==u||0!==s&&3!==v.nodeType||(d=p+s),v!==f||0!==c&&3!==v.nodeType||(h=p+c),3===v.nodeType&&(p+=v.nodeValue.length),null!==(y=v.firstChild);)b=v,v=y;for(;;){if(v===l)break t;if(b===u&&++m===s&&(d=p),b===f&&++g===c&&(h=p),null!==(y=v.nextSibling))break;b=(v=b).parentNode}v=y}u=-1===d||-1===h?null:{start:d,end:h}}else u=null}u=u||{start:0,end:0}}else u=null;gn={activeElementDetached:null,focusedElem:l,selectionRange:u},Yt=!1,Dl=o;do{try{hu()}catch(e){if(null===Dl)throw Error(a(330));bu(Dl,e),Dl=Dl.nextEffect}}while(null!==Dl);Dl=o;do{try{for(l=e,u=t;null!==Dl;){var w=Dl.effectTag;if(16&w&&Ve(Dl.stateNode,""),128&w){var x=Dl.alternate;if(null!==x){var E=x.ref;null!==E&&("function"==typeof E?E(null):E.current=null)}}switch(1038&w){case 2:cl(Dl),Dl.effectTag&=-3;break;case 6:cl(Dl),Dl.effectTag&=-3,fl(Dl.alternate,Dl);break;case 1024:Dl.effectTag&=-1025;break;case 1028:Dl.effectTag&=-1025,fl(Dl.alternate,Dl);break;case 4:fl(Dl.alternate,Dl);break;case 8:sl(l,s=Dl,u),ll(s)}Dl=Dl.nextEffect}}catch(e){if(null===Dl)throw Error(a(330));bu(Dl,e),Dl=Dl.nextEffect}}while(null!==Dl);if(E=gn,x=dn(),w=E.focusedElem,u=E.selectionRange,x!==w&&w&&w.ownerDocument&&function e(t,n){return!(!t||!n)&&(t===n||(!t||3!==t.nodeType)&&(n&&3===n.nodeType?e(t,n.parentNode):"contains"in t?t.contains(n):!!t.compareDocumentPosition&&!!(16&t.compareDocumentPosition(n))))}(w.ownerDocument.documentElement,w)){null!==u&&hn(w)&&(x=u.start,void 0===(E=u.end)&&(E=x),"selectionStart"in w?(w.selectionStart=x,w.selectionEnd=Math.min(E,w.value.length)):(E=(x=w.ownerDocument||document)&&x.defaultView||window).getSelection&&(E=E.getSelection(),s=w.textContent.length,l=Math.min(u.start,s),u=void 0===u.end?l:Math.min(u.end,s),!E.extend&&l>u&&(s=u,u=l,l=s),s=pn(w,l),f=pn(w,u),s&&f&&(1!==E.rangeCount||E.anchorNode!==s.node||E.anchorOffset!==s.offset||E.focusNode!==f.node||E.focusOffset!==f.offset)&&((x=x.createRange()).setStart(s.node,s.offset),E.removeAllRanges(),l>u?(E.addRange(x),E.extend(f.node,f.offset)):(x.setEnd(f.node,f.offset),E.addRange(x))))),x=[];for(E=w;E=E.parentNode;)1===E.nodeType&&x.push({element:E,left:E.scrollLeft,top:E.scrollTop});for("function"==typeof w.focus&&w.focus(),w=0;w<x.length;w++)(E=x[w]).element.scrollLeft=E.left,E.element.scrollTop=E.top}Yt=!!mn,gn=mn=null,e.current=n,Dl=o;do{try{for(w=e;null!==Dl;){var k=Dl.effectTag;if(36&k&&il(w,Dl.alternate,Dl),128&k){x=void 0;var S=Dl.ref;if(null!==S){var O=Dl.stateNode;switch(Dl.tag){case 5:x=O;break;default:x=O}"function"==typeof S?S(x):S.current=x}}Dl=Dl.nextEffect}}catch(e){if(null===Dl)throw Error(a(330));bu(Dl,e),Dl=Dl.nextEffect}}while(null!==Dl);Dl=null,Do(),kl=i}else e.current=n;if(Nl)Nl=!1,Vl=e,Hl=t;else for(Dl=o;null!==Dl;)t=Dl.nextEffect,Dl.nextEffect=null,Dl=t;if(0===(t=e.firstPendingTime)&&(Ll=null),1073741823===t?e===Ul?Wl++:(Wl=0,Ul=e):Wl=0,"function"==typeof xu&&xu(n.stateNode,r),Kl(e),Rl)throw Rl=!1,e=Il,Il=null,e;return 0!=(8&kl)||Yo(),null}function hu(){for(;null!==Dl;){var e=Dl.effectTag;0!=(256&e)&&nl(Dl.alternate,Dl),0==(512&e)||Nl||(Nl=!0,Uo(97,(function(){return mu(),null}))),Dl=Dl.nextEffect}}function mu(){if(90!==Hl){var e=97<Hl?97:Hl;return Hl=90,Wo(e,gu)}}function gu(){if(null===Vl)return!1;var e=Vl;if(Vl=null,0!=(48&kl))throw Error(a(331));var t=kl;for(kl|=32,e=e.current.firstEffect;null!==e;){try{var n=e;if(0!=(512&n.effectTag))switch(n.tag){case 0:case 11:case 15:case 22:rl(5,n),ol(5,n)}}catch(t){if(null===e)throw Error(a(330));bu(e,t)}n=e.nextEffect,e.nextEffect=null,e=n}return kl=t,Yo(),!0}function vu(e,t,n){ci(e,t=hl(e,t=Za(n,t),1073741823)),null!==(e=Gl(e,1073741823))&&Kl(e)}function bu(e,t){if(3===e.tag)vu(e,e,t);else for(var n=e.return;null!==n;){if(3===n.tag){vu(n,e,t);break}if(1===n.tag){var r=n.stateNode;if("function"==typeof n.type.getDerivedStateFromError||"function"==typeof r.componentDidCatch&&(null===Ll||!Ll.has(r))){ci(n,e=ml(n,e=Za(t,e),1073741823)),null!==(n=Gl(n,1073741823))&&Kl(n);break}}n=n.return}}function yu(e,t,n){var r=e.pingCache;null!==r&&r.delete(t),Sl===e&&Cl===n?Tl===El||Tl===xl&&1073741823===Pl&&Vo()-jl<500?nu(e,Cl):Fl=!0:zu(e,n)&&(0!==(t=e.lastPingedTime)&&t<n||(e.lastPingedTime=n,Kl(e)))}function wu(e,t){var n=e.stateNode;null!==n&&n.delete(t),0===(t=0)&&(t=Ql(t=Yl(),e,null)),null!==(e=Gl(e,t))&&Kl(e)}gl=function(e,t,n){var r=t.expirationTime;if(null!==e){var o=t.pendingProps;if(e.memoizedProps!==o||po.current)Aa=!0;else{if(r<n){switch(Aa=!1,t.tag){case 3:Na(t),_a();break;case 5:if(ji(t),4&t.mode&&1!==n&&o.hidden)return t.expirationTime=t.childExpirationTime=1,null;break;case 1:go(t.type)&&wo(t);break;case 4:zi(t,t.stateNode.containerInfo);break;case 10:r=t.memoizedProps.value,o=t.type._context,co(qo,o._currentValue),o._currentValue=r;break;case 13:if(null!==t.memoizedState)return 0!==(r=t.child.childExpirationTime)&&r>=n?Ua(e,t,n):(co(Ri,1&Ri.current),null!==(t=Xa(e,t,n))?t.sibling:null);co(Ri,1&Ri.current);break;case 19:if(r=t.childExpirationTime>=n,0!=(64&e.effectTag)){if(r)return Qa(e,t,n);t.effectTag|=64}if(null!==(o=t.memoizedState)&&(o.rendering=null,o.tail=null),co(Ri,Ri.current),!r)return null}return Xa(e,t,n)}Aa=!1}}else Aa=!1;switch(t.expirationTime=0,t.tag){case 2:if(r=t.type,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),e=t.pendingProps,o=mo(t,fo.current),ri(t,n),o=Xi(null,t,r,e,o,n),t.effectTag|=1,"object"==typeof o&&null!==o&&"function"==typeof o.render&&void 0===o.$$typeof){if(t.tag=1,t.memoizedState=null,t.updateQueue=null,go(r)){var i=!0;wo(t)}else i=!1;t.memoizedState=null!==o.state&&void 0!==o.state?o.state:null,ai(t);var l=r.getDerivedStateFromProps;"function"==typeof l&&mi(t,r,l,e),o.updater=gi,t.stateNode=o,o._reactInternalFiber=t,wi(t,r,e,n),t=La(null,t,r,!0,i,n)}else t.tag=0,Ma(null,t,o,n),t=t.child;return t;case 16:e:{if(o=t.elementType,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),e=t.pendingProps,function(e){if(-1===e._status){e._status=0;var t=e._ctor;t=t(),e._result=t,t.then((function(t){0===e._status&&(t=t.default,e._status=1,e._result=t)}),(function(t){0===e._status&&(e._status=2,e._result=t)}))}}(o),1!==o._status)throw o._result;switch(o=o._result,t.type=o,i=t.tag=function(e){if("function"==typeof e)return Ou(e)?1:0;if(null!=e){if((e=e.$$typeof)===ue)return 11;if(e===fe)return 14}return 2}(o),e=Go(o,e),i){case 0:t=Ra(null,t,o,e,n);break e;case 1:t=Ia(null,t,o,e,n);break e;case 11:t=za(null,t,o,e,n);break e;case 14:t=Fa(null,t,o,Go(o.type,e),r,n);break e}throw Error(a(306,o,""))}return t;case 0:return r=t.type,o=t.pendingProps,Ra(e,t,r,o=t.elementType===r?o:Go(r,o),n);case 1:return r=t.type,o=t.pendingProps,Ia(e,t,r,o=t.elementType===r?o:Go(r,o),n);case 3:if(Na(t),r=t.updateQueue,null===e||null===r)throw Error(a(282));if(r=t.pendingProps,o=null!==(o=t.memoizedState)?o.element:null,li(e,t),fi(t,r,null,n),(r=t.memoizedState.element)===o)_a(),t=Xa(e,t,n);else{if((o=t.stateNode.hydrate)&&(xa=xn(t.stateNode.containerInfo.firstChild),wa=t,o=Ea=!0),o)for(n=Ci(t,null,r,n),t.child=n;n;)n.effectTag=-3&n.effectTag|1024,n=n.sibling;else Ma(e,t,r,n),_a();t=t.child}return t;case 5:return ji(t),null===e&&Oa(t),r=t.type,o=t.pendingProps,i=null!==e?e.memoizedProps:null,l=o.children,bn(r,o)?l=null:null!==i&&bn(r,i)&&(t.effectTag|=16),Da(e,t),4&t.mode&&1!==n&&o.hidden?(t.expirationTime=t.childExpirationTime=1,t=null):(Ma(e,t,l,n),t=t.child),t;case 6:return null===e&&Oa(t),null;case 13:return Ua(e,t,n);case 4:return zi(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=Oi(t,null,r,n):Ma(e,t,r,n),t.child;case 11:return r=t.type,o=t.pendingProps,za(e,t,r,o=t.elementType===r?o:Go(r,o),n);case 7:return Ma(e,t,t.pendingProps,n),t.child;case 8:case 12:return Ma(e,t,t.pendingProps.children,n),t.child;case 10:e:{r=t.type._context,o=t.pendingProps,l=t.memoizedProps,i=o.value;var u=t.type._context;if(co(qo,u._currentValue),u._currentValue=i,null!==l)if(u=l.value,0===(i=Ir(u,i)?0:0|("function"==typeof r._calculateChangedBits?r._calculateChangedBits(u,i):1073741823))){if(l.children===o.children&&!po.current){t=Xa(e,t,n);break e}}else for(null!==(u=t.child)&&(u.return=t);null!==u;){var c=u.dependencies;if(null!==c){l=u.child;for(var s=c.firstContext;null!==s;){if(s.context===r&&0!=(s.observedBits&i)){1===u.tag&&((s=ui(n,null)).tag=2,ci(u,s)),u.expirationTime<n&&(u.expirationTime=n),null!==(s=u.alternate)&&s.expirationTime<n&&(s.expirationTime=n),ni(u.return,n),c.expirationTime<n&&(c.expirationTime=n);break}s=s.next}}else l=10===u.tag&&u.type===t.type?null:u.child;if(null!==l)l.return=u;else for(l=u;null!==l;){if(l===t){l=null;break}if(null!==(u=l.sibling)){u.return=l.return,l=u;break}l=l.return}u=l}Ma(e,t,o.children,n),t=t.child}return t;case 9:return o=t.type,r=(i=t.pendingProps).children,ri(t,n),r=r(o=oi(o,i.unstable_observedBits)),t.effectTag|=1,Ma(e,t,r,n),t.child;case 14:return i=Go(o=t.type,t.pendingProps),Fa(e,t,o,i=Go(o.type,i),r,n);case 15:return ja(e,t,t.type,t.pendingProps,r,n);case 17:return r=t.type,o=t.pendingProps,o=t.elementType===r?o:Go(r,o),null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),t.tag=1,go(r)?(e=!0,wo(t)):e=!1,ri(t,n),bi(t,r,o),wi(t,r,o,n),La(null,t,r,!0,e,n);case 19:return Qa(e,t,n)}throw Error(a(156,t.tag))};var xu=null,Eu=null;function ku(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.effectTag=0,this.lastEffect=this.firstEffect=this.nextEffect=null,this.childExpirationTime=this.expirationTime=0,this.alternate=null}function Su(e,t,n,r){return new ku(e,t,n,r)}function Ou(e){return!(!(e=e.prototype)||!e.isReactComponent)}function Cu(e,t){var n=e.alternate;return null===n?((n=Su(e.tag,t,e.key,e.mode)).elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.effectTag=0,n.nextEffect=null,n.firstEffect=null,n.lastEffect=null),n.childExpirationTime=e.childExpirationTime,n.expirationTime=e.expirationTime,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=null===t?null:{expirationTime:t.expirationTime,firstContext:t.firstContext,responders:t.responders},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Tu(e,t,n,r,o,i){var l=2;if(r=e,"function"==typeof e)Ou(e)&&(l=1);else if("string"==typeof e)l=5;else e:switch(e){case ne:return _u(n.children,o,i,t);case le:l=8,o|=7;break;case re:l=8,o|=1;break;case oe:return(e=Su(12,n,t,8|o)).elementType=oe,e.type=oe,e.expirationTime=i,e;case ce:return(e=Su(13,n,t,o)).type=ce,e.elementType=ce,e.expirationTime=i,e;case se:return(e=Su(19,n,t,o)).elementType=se,e.expirationTime=i,e;default:if("object"==typeof e&&null!==e)switch(e.$$typeof){case ie:l=10;break e;case ae:l=9;break e;case ue:l=11;break e;case fe:l=14;break e;case pe:l=16,r=null;break e;case de:l=22;break e}throw Error(a(130,null==e?e:typeof e,""))}return(t=Su(l,n,t,o)).elementType=e,t.type=r,t.expirationTime=i,t}function _u(e,t,n,r){return(e=Su(7,e,r,t)).expirationTime=n,e}function Pu(e,t,n){return(e=Su(6,e,null,t)).expirationTime=n,e}function Au(e,t,n){return(t=Su(4,null!==e.children?e.children:[],e.key,t)).expirationTime=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function Mu(e,t,n){this.tag=t,this.current=null,this.containerInfo=e,this.pingCache=this.pendingChildren=null,this.finishedExpirationTime=0,this.finishedWork=null,this.timeoutHandle=-1,this.pendingContext=this.context=null,this.hydrate=n,this.callbackNode=null,this.callbackPriority=90,this.lastExpiredTime=this.lastPingedTime=this.nextKnownPendingLevel=this.lastSuspendedTime=this.firstSuspendedTime=this.firstPendingTime=0}function zu(e,t){var n=e.firstSuspendedTime;return e=e.lastSuspendedTime,0!==n&&n>=t&&e<=t}function Fu(e,t){var n=e.firstSuspendedTime,r=e.lastSuspendedTime;n<t&&(e.firstSuspendedTime=t),(r>t||0===n)&&(e.lastSuspendedTime=t),t<=e.lastPingedTime&&(e.lastPingedTime=0),t<=e.lastExpiredTime&&(e.lastExpiredTime=0)}function ju(e,t){t>e.firstPendingTime&&(e.firstPendingTime=t);var n=e.firstSuspendedTime;0!==n&&(t>=n?e.firstSuspendedTime=e.lastSuspendedTime=e.nextKnownPendingLevel=0:t>=e.lastSuspendedTime&&(e.lastSuspendedTime=t+1),t>e.nextKnownPendingLevel&&(e.nextKnownPendingLevel=t))}function Du(e,t){var n=e.lastExpiredTime;(0===n||n>t)&&(e.lastExpiredTime=t)}function Ru(e,t,n,r){var o=t.current,i=Yl(),l=di.suspense;i=Ql(i,o,l);e:if(n){t:{if(Je(n=n._reactInternalFiber)!==n||1!==n.tag)throw Error(a(170));var u=n;do{switch(u.tag){case 3:u=u.stateNode.context;break t;case 1:if(go(u.type)){u=u.stateNode.__reactInternalMemoizedMergedChildContext;break t}}u=u.return}while(null!==u);throw Error(a(171))}if(1===n.tag){var c=n.type;if(go(c)){n=yo(n,c,u);break e}}n=u}else n=so;return null===t.context?t.context=n:t.pendingContext=n,(t=ui(i,l)).payload={element:e},null!==(r=void 0===r?null:r)&&(t.callback=r),ci(o,t),Xl(o,i),i}function Iu(e){if(!(e=e.current).child)return null;switch(e.child.tag){case 5:default:return e.child.stateNode}}function Lu(e,t){null!==(e=e.memoizedState)&&null!==e.dehydrated&&e.retryTime<t&&(e.retryTime=t)}function Nu(e,t){Lu(e,t),(e=e.alternate)&&Lu(e,t)}function Vu(e,t,n){var r=new Mu(e,t,n=null!=n&&!0===n.hydrate),o=Su(3,null,null,2===t?7:1===t?3:0);r.current=o,o.stateNode=r,ai(o),e[Cn]=r.current,n&&0!==t&&function(e,t){var n=Ze(t);Ct.forEach((function(e){ht(e,t,n)})),Tt.forEach((function(e){ht(e,t,n)}))}(0,9===e.nodeType?e:e.ownerDocument),this._internalRoot=r}function Hu(e){return!(!e||1!==e.nodeType&&9!==e.nodeType&&11!==e.nodeType&&(8!==e.nodeType||" react-mount-point-unstable "!==e.nodeValue))}function Bu(e,t,n,r,o){var i=n._reactRootContainer;if(i){var a=i._internalRoot;if("function"==typeof o){var l=o;o=function(){var e=Iu(a);l.call(e)}}Ru(t,a,e,o)}else{if(i=n._reactRootContainer=function(e,t){if(t||(t=!(!(t=e?9===e.nodeType?e.documentElement:e.firstChild:null)||1!==t.nodeType||!t.hasAttribute("data-reactroot"))),!t)for(var n;n=e.lastChild;)e.removeChild(n);return new Vu(e,0,t?{hydrate:!0}:void 0)}(n,r),a=i._internalRoot,"function"==typeof o){var u=o;o=function(){var e=Iu(a);u.call(e)}}tu((function(){Ru(t,a,e,o)}))}return Iu(a)}function Wu(e,t,n){var r=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return{$$typeof:te,key:null==r?null:""+r,children:e,containerInfo:t,implementation:n}}function Uu(e,t){var n=2<arguments.length&&void 0!==arguments[2]?arguments[2]:null;if(!Hu(t))throw Error(a(200));return Wu(e,t,null,n)}Vu.prototype.render=function(e){Ru(e,this._internalRoot,null,null)},Vu.prototype.unmount=function(){var e=this._internalRoot,t=e.containerInfo;Ru(null,e,null,(function(){t[Cn]=null}))},mt=function(e){if(13===e.tag){var t=Xo(Yl(),150,100);Xl(e,t),Nu(e,t)}},gt=function(e){13===e.tag&&(Xl(e,3),Nu(e,3))},vt=function(e){if(13===e.tag){var t=Yl();Xl(e,t=Ql(t,e,null)),Nu(e,t)}},_=function(e,t,n){switch(t){case"input":if(Oe(e,n),t=n.name,"radio"===n.type&&null!=t){for(n=e;n.parentNode;)n=n.parentNode;for(n=n.querySelectorAll("input[name="+JSON.stringify(""+t)+'][type="radio"]'),t=0;t<n.length;t++){var r=n[t];if(r!==e&&r.form===e.form){var o=An(r);if(!o)throw Error(a(90));xe(r),Oe(r,o)}}}break;case"textarea":ze(e,n);break;case"select":null!=(t=n.value)&&Pe(e,!!n.multiple,t,!1)}},j=eu,D=function(e,t,n,r,o){var i=kl;kl|=4;try{return Wo(98,e.bind(null,t,n,r,o))}finally{0===(kl=i)&&Yo()}},R=function(){0==(49&kl)&&(function(){if(null!==Bl){var e=Bl;Bl=null,e.forEach((function(e,t){Du(t,e),Kl(t)})),Yo()}}(),mu())},I=function(e,t){var n=kl;kl|=2;try{return e(t)}finally{0===(kl=n)&&Yo()}};var $u,Yu,Qu={Events:[_n,Pn,An,C,k,In,function(e){ot(e,Rn)},z,F,Kt,lt,mu,{current:!1}]};Yu=($u={findFiberByHostInstance:Tn,bundleType:0,version:"16.13.1",rendererPackageName:"react-dom"}).findFiberByHostInstance,function(e){if("undefined"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__)return!1;var t=__REACT_DEVTOOLS_GLOBAL_HOOK__;if(t.isDisabled||!t.supportsFiber)return!0;try{var n=t.inject(e);xu=function(e){try{t.onCommitFiberRoot(n,e,void 0,64==(64&e.current.effectTag))}catch(e){}},Eu=function(e){try{t.onCommitFiberUnmount(n,e)}catch(e){}}}catch(e){}}(o({},$u,{overrideHookState:null,overrideProps:null,setSuspenseHandler:null,scheduleUpdate:null,currentDispatcherRef:q.ReactCurrentDispatcher,findHostInstanceByFiber:function(e){return null===(e=nt(e))?null:e.stateNode},findFiberByHostInstance:function(e){return Yu?Yu(e):null},findHostInstancesForRefresh:null,scheduleRefresh:null,scheduleRoot:null,setRefreshHandler:null,getCurrentFiber:null})),t.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=Qu,t.createPortal=Uu,t.findDOMNode=function(e){if(null==e)return null;if(1===e.nodeType)return e;var t=e._reactInternalFiber;if(void 0===t){if("function"==typeof e.render)throw Error(a(188));throw Error(a(268,Object.keys(e)))}return e=null===(e=nt(t))?null:e.stateNode},t.flushSync=function(e,t){if(0!=(48&kl))throw Error(a(187));var n=kl;kl|=1;try{return Wo(99,e.bind(null,t))}finally{kl=n,Yo()}},t.hydrate=function(e,t,n){if(!Hu(t))throw Error(a(200));return Bu(null,e,t,!0,n)},t.render=function(e,t,n){if(!Hu(t))throw Error(a(200));return Bu(null,e,t,!1,n)},t.unmountComponentAtNode=function(e){if(!Hu(e))throw Error(a(40));return!!e._reactRootContainer&&(tu((function(){Bu(null,null,e,!1,(function(){e._reactRootContainer=null,e[Cn]=null}))})),!0)},t.unstable_batchedUpdates=eu,t.unstable_createPortal=function(e,t){return Uu(e,t,2<arguments.length&&void 0!==arguments[2]?arguments[2]:null)},t.unstable_renderSubtreeIntoContainer=function(e,t,n,r){if(!Hu(n))throw Error(a(200));if(null==e||void 0===e._reactInternalFiber)throw Error(a(38));return Bu(e,t,n,!1,r)},t.version="16.13.1"},function(e,t,n){"use strict";e.exports=n(30)},function(e,t,n){"use strict"; +/** @license React v0.19.1 + * scheduler.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var r,o,i,a,l;if("undefined"==typeof window||"function"!=typeof MessageChannel){var u=null,c=null,s=function(){if(null!==u)try{var e=t.unstable_now();u(!0,e),u=null}catch(e){throw setTimeout(s,0),e}},f=Date.now();t.unstable_now=function(){return Date.now()-f},r=function(e){null!==u?setTimeout(r,0,e):(u=e,setTimeout(s,0))},o=function(e,t){c=setTimeout(e,t)},i=function(){clearTimeout(c)},a=function(){return!1},l=t.unstable_forceFrameRate=function(){}}else{var p=window.performance,d=window.Date,h=window.setTimeout,m=window.clearTimeout;if("undefined"!=typeof console){var g=window.cancelAnimationFrame;"function"!=typeof window.requestAnimationFrame&&console.error("This browser doesn't support requestAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"),"function"!=typeof g&&console.error("This browser doesn't support cancelAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills")}if("object"==typeof p&&"function"==typeof p.now)t.unstable_now=function(){return p.now()};else{var v=d.now();t.unstable_now=function(){return d.now()-v}}var b=!1,y=null,w=-1,x=5,E=0;a=function(){return t.unstable_now()>=E},l=function(){},t.unstable_forceFrameRate=function(e){0>e||125<e?console.error("forceFrameRate takes a positive int between 0 and 125, forcing framerates higher than 125 fps is not unsupported"):x=0<e?Math.floor(1e3/e):5};var k=new MessageChannel,S=k.port2;k.port1.onmessage=function(){if(null!==y){var e=t.unstable_now();E=e+x;try{y(!0,e)?S.postMessage(null):(b=!1,y=null)}catch(e){throw S.postMessage(null),e}}else b=!1},r=function(e){y=e,b||(b=!0,S.postMessage(null))},o=function(e,n){w=h((function(){e(t.unstable_now())}),n)},i=function(){m(w),w=-1}}function O(e,t){var n=e.length;e.push(t);e:for(;;){var r=n-1>>>1,o=e[r];if(!(void 0!==o&&0<_(o,t)))break e;e[r]=t,e[n]=o,n=r}}function C(e){return void 0===(e=e[0])?null:e}function T(e){var t=e[0];if(void 0!==t){var n=e.pop();if(n!==t){e[0]=n;e:for(var r=0,o=e.length;r<o;){var i=2*(r+1)-1,a=e[i],l=i+1,u=e[l];if(void 0!==a&&0>_(a,n))void 0!==u&&0>_(u,a)?(e[r]=u,e[l]=n,r=l):(e[r]=a,e[i]=n,r=i);else{if(!(void 0!==u&&0>_(u,n)))break e;e[r]=u,e[l]=n,r=l}}}return t}return null}function _(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}var P=[],A=[],M=1,z=null,F=3,j=!1,D=!1,R=!1;function I(e){for(var t=C(A);null!==t;){if(null===t.callback)T(A);else{if(!(t.startTime<=e))break;T(A),t.sortIndex=t.expirationTime,O(P,t)}t=C(A)}}function L(e){if(R=!1,I(e),!D)if(null!==C(P))D=!0,r(N);else{var t=C(A);null!==t&&o(L,t.startTime-e)}}function N(e,n){D=!1,R&&(R=!1,i()),j=!0;var r=F;try{for(I(n),z=C(P);null!==z&&(!(z.expirationTime>n)||e&&!a());){var l=z.callback;if(null!==l){z.callback=null,F=z.priorityLevel;var u=l(z.expirationTime<=n);n=t.unstable_now(),"function"==typeof u?z.callback=u:z===C(P)&&T(P),I(n)}else T(P);z=C(P)}if(null!==z)var c=!0;else{var s=C(A);null!==s&&o(L,s.startTime-n),c=!1}return c}finally{z=null,F=r,j=!1}}function V(e){switch(e){case 1:return-1;case 2:return 250;case 5:return 1073741823;case 4:return 1e4;default:return 5e3}}var H=l;t.unstable_IdlePriority=5,t.unstable_ImmediatePriority=1,t.unstable_LowPriority=4,t.unstable_NormalPriority=3,t.unstable_Profiling=null,t.unstable_UserBlockingPriority=2,t.unstable_cancelCallback=function(e){e.callback=null},t.unstable_continueExecution=function(){D||j||(D=!0,r(N))},t.unstable_getCurrentPriorityLevel=function(){return F},t.unstable_getFirstCallbackNode=function(){return C(P)},t.unstable_next=function(e){switch(F){case 1:case 2:case 3:var t=3;break;default:t=F}var n=F;F=t;try{return e()}finally{F=n}},t.unstable_pauseExecution=function(){},t.unstable_requestPaint=H,t.unstable_runWithPriority=function(e,t){switch(e){case 1:case 2:case 3:case 4:case 5:break;default:e=3}var n=F;F=e;try{return t()}finally{F=n}},t.unstable_scheduleCallback=function(e,n,a){var l=t.unstable_now();if("object"==typeof a&&null!==a){var u=a.delay;u="number"==typeof u&&0<u?l+u:l,a="number"==typeof a.timeout?a.timeout:V(e)}else a=V(e),u=l;return e={id:M++,callback:n,priorityLevel:e,startTime:u,expirationTime:a=u+a,sortIndex:-1},u>l?(e.sortIndex=u,O(A,e),null===C(P)&&e===C(A)&&(R?i():R=!0,o(L,u-l))):(e.sortIndex=a,O(P,e),D||j||(D=!0,r(N))),e},t.unstable_shouldYield=function(){var e=t.unstable_now();I(e);var n=C(P);return n!==z&&null!==z&&null!==n&&null!==n.callback&&n.startTime<=e&&n.expirationTime<z.expirationTime||a()},t.unstable_wrapCallback=function(e){var t=F;return function(){var n=F;F=t;try{return e.apply(this,arguments)}finally{F=n}}}},function(e,t,n){"use strict";n.r(t),n.d(t,"props",(function(){return a})),n.d(t,"createShouldForwardProp",(function(){return l}));var r=n(12),o=n(15),i=n(3),a=Object(i.compose)(i.space,i.typography,i.color,i.layout,i.flexbox,i.border,i.background,i.position,i.grid,i.shadow,i.buttonStyle,i.textStyle,i.colorStyle).propNames,l=function(e){var t=new RegExp("^("+e.join("|")+")$");return Object(r.a)((function(e){return Object(o.a)(e)&&!t.test(e)}))};t.default=l(a)},function(e,t,n){"use strict";var r=Array.isArray,o=Object.keys,i=Object.prototype.hasOwnProperty;e.exports=function e(t,n){if(t===n)return!0;if(t&&n&&"object"==typeof t&&"object"==typeof n){var a,l,u,c=r(t),s=r(n);if(c&&s){if((l=t.length)!=n.length)return!1;for(a=l;0!=a--;)if(!e(t[a],n[a]))return!1;return!0}if(c!=s)return!1;var f=t instanceof Date,p=n instanceof Date;if(f!=p)return!1;if(f&&p)return t.getTime()==n.getTime();var d=t instanceof RegExp,h=n instanceof RegExp;if(d!=h)return!1;if(d&&h)return t.toString()==n.toString();var m=o(t);if((l=m.length)!==o(n).length)return!1;for(a=l;0!=a--;)if(!i.call(n,m[a]))return!1;for(a=l;0!=a--;)if(!e(t[u=m[a]],n[u]))return!1;return!0}return t!=t&&n!=n}},function(e,t,n){"use strict";var r=n(34);function o(){}function i(){}i.resetWarningCache=o,e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var l=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw l.name="Invariant Violation",l}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:i,resetWarningCache:o};return n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){(function(e){var r=void 0!==e&&e||"undefined"!=typeof self&&self||window,o=Function.prototype.apply;function i(e,t){this._id=e,this._clearFn=t}t.setTimeout=function(){return new i(o.call(setTimeout,r,arguments),clearTimeout)},t.setInterval=function(){return new i(o.call(setInterval,r,arguments),clearInterval)},t.clearTimeout=t.clearInterval=function(e){e&&e.close()},i.prototype.unref=i.prototype.ref=function(){},i.prototype.close=function(){this._clearFn.call(r,this._id)},t.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},t.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},t._unrefActive=t.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;t>=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(36),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(19))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,o,i,a,l,u=1,c={},s=!1,f=e.document,p=Object.getPrototypeOf&&Object.getPrototypeOf(e);p=p&&p.setTimeout?p:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((i=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){i.port2.postMessage(e)}):f&&"onreadystatechange"in f.createElement("script")?(o=f.documentElement,r=function(e){var t=f.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,o.removeChild(t),t=null},o.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",l=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",l,!1):e.attachEvent("onmessage",l),r=function(t){e.postMessage(a+t,"*")}),p.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n<t.length;n++)t[n]=arguments[n+1];var o={callback:e,args:t};return c[u]=o,r(u),u++},p.clearImmediate=d}function d(e){delete c[e]}function h(e){if(s)setTimeout(h,0,e);else{var t=c[e];if(t){s=!0;try{!function(e){var t=e.callback,n=e.args;switch(n.length){case 0:t();break;case 1:t(n[0]);break;case 2:t(n[0],n[1]);break;case 3:t(n[0],n[1],n[2]);break;default:t.apply(void 0,n)}}(t)}finally{d(e),s=!1}}}}}("undefined"==typeof self?void 0===e?this:e:self)}).call(this,n(19),n(37))},function(e,t){var n,r,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function l(e){if(n===setTimeout)return setTimeout(e,0);if((n===i||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:i}catch(e){n=i}try{r="function"==typeof clearTimeout?clearTimeout:a}catch(e){r=a}}();var u,c=[],s=!1,f=-1;function p(){s&&u&&(s=!1,u.length?c=u.concat(c):f=-1,c.length&&d())}function d(){if(!s){var e=l(p);s=!0;for(var t=c.length;t;){for(u=c,c=[];++f<t;)u&&u[f].run();f=-1,t=c.length}u=null,s=!1,function(e){if(r===clearTimeout)return clearTimeout(e);if((r===a||!r)&&clearTimeout)return r=clearTimeout,clearTimeout(e);try{r(e)}catch(t){try{return r.call(null,e)}catch(t){return r.call(this,e)}}}(e)}}function h(e,t){this.fun=e,this.array=t}function m(){}o.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];c.push(new h(e,t)),1!==c.length||s||l(d)},h.prototype.run=function(){this.fun.apply(null,this.array)},o.title="browser",o.browser=!0,o.env={},o.argv=[],o.version="",o.versions={},o.on=m,o.addListener=m,o.once=m,o.off=m,o.removeListener=m,o.removeAllListeners=m,o.emit=m,o.prependListener=m,o.prependOnceListener=m,o.listeners=function(e){return[]},o.binding=function(e){throw new Error("process.binding is not supported")},o.cwd=function(){return"/"},o.chdir=function(e){throw new Error("process.chdir is not supported")},o.umask=function(){return 0}},function(e,t,n){"use strict";e.exports=n(39)},function(e,t,n){"use strict"; +/** @license React v16.13.1 + * react-is.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var r="function"==typeof Symbol&&Symbol.for,o=r?Symbol.for("react.element"):60103,i=r?Symbol.for("react.portal"):60106,a=r?Symbol.for("react.fragment"):60107,l=r?Symbol.for("react.strict_mode"):60108,u=r?Symbol.for("react.profiler"):60114,c=r?Symbol.for("react.provider"):60109,s=r?Symbol.for("react.context"):60110,f=r?Symbol.for("react.async_mode"):60111,p=r?Symbol.for("react.concurrent_mode"):60111,d=r?Symbol.for("react.forward_ref"):60112,h=r?Symbol.for("react.suspense"):60113,m=r?Symbol.for("react.suspense_list"):60120,g=r?Symbol.for("react.memo"):60115,v=r?Symbol.for("react.lazy"):60116,b=r?Symbol.for("react.block"):60121,y=r?Symbol.for("react.fundamental"):60117,w=r?Symbol.for("react.responder"):60118,x=r?Symbol.for("react.scope"):60119;function E(e){if("object"==typeof e&&null!==e){var t=e.$$typeof;switch(t){case o:switch(e=e.type){case f:case p:case a:case u:case l:case h:return e;default:switch(e=e&&e.$$typeof){case s:case d:case v:case g:case c:return e;default:return t}}case i:return t}}}function k(e){return E(e)===p}t.AsyncMode=f,t.ConcurrentMode=p,t.ContextConsumer=s,t.ContextProvider=c,t.Element=o,t.ForwardRef=d,t.Fragment=a,t.Lazy=v,t.Memo=g,t.Portal=i,t.Profiler=u,t.StrictMode=l,t.Suspense=h,t.isAsyncMode=function(e){return k(e)||E(e)===f},t.isConcurrentMode=k,t.isContextConsumer=function(e){return E(e)===s},t.isContextProvider=function(e){return E(e)===c},t.isElement=function(e){return"object"==typeof e&&null!==e&&e.$$typeof===o},t.isForwardRef=function(e){return E(e)===d},t.isFragment=function(e){return E(e)===a},t.isLazy=function(e){return E(e)===v},t.isMemo=function(e){return E(e)===g},t.isPortal=function(e){return E(e)===i},t.isProfiler=function(e){return E(e)===u},t.isStrictMode=function(e){return E(e)===l},t.isSuspense=function(e){return E(e)===h},t.isValidElementType=function(e){return"string"==typeof e||"function"==typeof e||e===a||e===p||e===u||e===l||e===h||e===m||"object"==typeof e&&null!==e&&(e.$$typeof===v||e.$$typeof===g||e.$$typeof===c||e.$$typeof===s||e.$$typeof===d||e.$$typeof===y||e.$$typeof===w||e.$$typeof===x||e.$$typeof===b)},t.typeOf=E},function(e,t){var n="undefined"!=typeof crypto&&crypto.getRandomValues&&crypto.getRandomValues.bind(crypto)||"undefined"!=typeof msCrypto&&"function"==typeof window.msCrypto.getRandomValues&&msCrypto.getRandomValues.bind(msCrypto);if(n){var r=new Uint8Array(16);e.exports=function(){return n(r),r}}else{var o=new Array(16);e.exports=function(){for(var e,t=0;t<16;t++)0==(3&t)&&(e=4294967296*Math.random()),o[t]=e>>>((3&t)<<3)&255;return o}}},function(e,t){for(var n=[],r=0;r<256;++r)n[r]=(r+256).toString(16).substr(1);e.exports=function(e,t){var r=t||0,o=n;return[o[e[r++]],o[e[r++]],o[e[r++]],o[e[r++]],"-",o[e[r++]],o[e[r++]],"-",o[e[r++]],o[e[r++]],"-",o[e[r++]],o[e[r++]],"-",o[e[r++]],o[e[r++]],o[e[r++]],o[e[r++]],o[e[r++]],o[e[r++]]].join("")}},function(e,t,n){"use strict";n.r(t),n.d(t,"default",(function(){return Ur}));var r=n(0),o=n.n(r),i=n(10),a=n.n(i),l=n(7);function u(){return(u=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var c=Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,tx:"text"},e))})),s=Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,as:"h2",tx:"text",variant:"heading"},e,{__css:{fontSize:4,fontFamily:"heading",fontWeight:"heading",lineHeight:"heading"}}))})),f=(Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,as:"a",variant:"link"},e))})),Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,as:"button",tx:"buttons",variant:"primary"},e,{__css:{appearance:"none",display:"inline-block",textAlign:"center",lineHeight:"inherit",textDecoration:"none",fontSize:"inherit",px:3,py:2,color:"white",bg:"primary",border:0,borderRadius:4}}))}))),p=Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,as:"img"},e,{__css:{maxWidth:"100%",height:"auto"}}))})),d=Object(r.forwardRef)((function(e,t){return o.a.createElement(l.Box,u({ref:t,variant:"card"},e))})),h=function(){return(h=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},m=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var g=function(e){var t,n=e.sx,o=e.variant,i=void 0===o?"medium":o,a=e.children,u=e.username,c=m(e,["sx","variant","children","username"]);return r.createElement(l.Flex,h({variant:"avatar."+i,justifyContent:"center",alignItems:"center",fontSize:4,color:"neutral_0",bg:"cyan_2"},c,{sx:h({textAlign:"center",borderRadius:"50%",lineHeight:"1em"},n||{})}),a||((t=u)?t.split(" ").slice(0,2).map((function(e){return e.slice(0,1).toUpperCase()})).join(""):null)||null)},v=n(22),b=n.n(v),y=function(){return(y=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},w=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var x,E=function(e){var t=e.showWhenDisabled,n=e.active,o=e.fullWidth,i=e.large,a=e.block,l=e.variant,u=void 0===l?"primary":l,c=w(e,["showWhenDisabled","active","fullWidth","large","block","variant"]),s=("icon"===u||"headerIcon"===u)&&!t,p=c.disabled||!c.onClick&&!c.href;return r.createElement(f,y({as:"button",variant:"button."+u,className:b()({active:n,large:i,disabled:c.disabled}),sx:y(y({},a?{borderRadius:0}:{}),{alignSelf:"center",cursor:p?"not-allowed":"pointer",letterSpacing:0,position:"relative",transition:"all .2s ease","&:disabled, &.disabled":{boxShadow:"none",display:s?"none":void 0},width:o?"100%":null})},c))},k=n(11),S=n(23),O=n.n(S),C=function(e,t){return Object.defineProperty?Object.defineProperty(e,"raw",{value:t}):e.raw=t,e},T=function(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var r,o,i=n.call(e),a=[];try{for(;(void 0===t||t-- >0)&&!(r=i.next()).done;)a.push(r.value)}catch(e){o={error:e}}finally{try{r&&!r.done&&(n=i.return)&&n.call(i)}finally{if(o)throw o.error}}return a},_=Object(k.default)(O.a)(x||(x=C(["\n height: 100%;\n overflow: auto;\n display: inline-block;\n width: 100%;\n height: max-content;\n min-height: 24px;\n line-height: 24px;\n font-size: 14px;\n white-space: pre-wrap;\n word-wrap: break-word;\n user-select: text;\n outline: none;\n color: #252d40;\n\n &:empty:before {\n content: attr(placeholder);\n position: absolute;\n color: #979fb4;\n pointer-events: none;\n }\n"],["\n height: 100%;\n overflow: auto;\n display: inline-block;\n width: 100%;\n height: max-content;\n min-height: 24px;\n line-height: 24px;\n font-size: 14px;\n white-space: pre-wrap;\n word-wrap: break-word;\n user-select: text;\n outline: none;\n color: #252d40;\n\n &:empty:before {\n content: attr(placeholder);\n position: absolute;\n color: #979fb4;\n pointer-events: none;\n }\n"]))),P=r.memo((function(e){var t=e.autoFocus,n=void 0===t||t,o=e.onSendMessage,i=T(r.useState(""),2),a=i[0],u=i[1],c=r.useRef(null);r.useEffect((function(){n&&c.current&&c.current.focus()}),[n]);var s=r.useCallback((function(){a&&(o(a),u(""))}),[a,o]),f=r.useCallback((function(){u(""),o("/restart")}),[o]),p=r.useCallback((function(e){"<br>"!==e.target.value?u(e.target.value):u("")}),[u]);return r.createElement(l.Flex,{flex:1,sx:{height:"72px"},onKeyDown:function(e){console.log(a),console.log(!a),s&&"Enter"===e.key&&(e.preventDefault(),s())}},r.createElement(l.Flex,{flex:1,pl:4,alignItems:"center",sx:{overflow:"auto"}},r.createElement(_,{placeholder:"Start typing a message…",html:a,onChange:p})),r.createElement(l.Flex,null,r.createElement(E,{variant:"headerIcon",p:"28px",onClick:f},r.createElement(K,{icon:"sync"})),r.createElement(l.Box,null,r.createElement(E,{variant:"headerIcon",p:"28px",disabled:""===a,showWhenDisabled:!0,onClick:s},r.createElement(K,{icon:"paper-plane"})))))})),A=n(13),M=n(2),z=n.n(M);function F(e){return(F="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function j(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function D(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function R(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?D(Object(n),!0).forEach((function(t){j(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):D(Object(n)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}function I(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function L(e){return function(e){if(Array.isArray(e)){for(var t=0,n=new Array(e.length);t<e.length;t++)n[t]=e[t];return n}}(e)||function(e){if(Symbol.iterator in Object(e)||"[object Arguments]"===Object.prototype.toString.call(e))return Array.from(e)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance")}()}function N(e){return t=e,(t-=0)==t?e:(e=e.replace(/[\-_\s]+(.)?/g,(function(e,t){return t?t.toUpperCase():""}))).substr(0,1).toLowerCase()+e.substr(1);var t}function V(e){return e.split(";").map((function(e){return e.trim()})).filter((function(e){return e})).reduce((function(e,t){var n,r=t.indexOf(":"),o=N(t.slice(0,r)),i=t.slice(r+1).trim();return o.startsWith("webkit")?e[(n=o,n.charAt(0).toUpperCase()+n.slice(1))]=i:e[o]=i,e}),{})}var H=!1;try{H=!0}catch(e){}function B(e){return null===e?null:"object"===F(e)&&e.prefix&&e.iconName?e:Array.isArray(e)&&2===e.length?{prefix:e[0],iconName:e[1]}:"string"==typeof e?{prefix:"fas",iconName:e}:void 0}function W(e,t){return Array.isArray(t)&&t.length>0||!Array.isArray(t)&&t?j({},e,t):{}}function U(e){var t=e.icon,n=e.mask,r=e.symbol,o=e.className,i=e.title,a=B(t),l=W("classes",[].concat(L(function(e){var t,n=e.spin,r=e.pulse,o=e.fixedWidth,i=e.inverse,a=e.border,l=e.listItem,u=e.flip,c=e.size,s=e.rotation,f=e.pull,p=(j(t={"fa-spin":n,"fa-pulse":r,"fa-fw":o,"fa-inverse":i,"fa-border":a,"fa-li":l,"fa-flip-horizontal":"horizontal"===u||"both"===u,"fa-flip-vertical":"vertical"===u||"both"===u},"fa-".concat(c),null!=c),j(t,"fa-rotate-".concat(s),null!=s),j(t,"fa-pull-".concat(f),null!=f),j(t,"fa-swap-opacity",e.swapOpacity),t);return Object.keys(p).map((function(e){return p[e]?e:null})).filter((function(e){return e}))}(e)),L(o.split(" ")))),u=W("transform","string"==typeof e.transform?A.c.transform(e.transform):e.transform),c=W("mask",B(n)),s=Object(A.a)(a,R({},l,{},u,{},c,{symbol:r,title:i}));if(!s)return function(){var e;!H&&console&&"function"==typeof console.error&&(e=console).error.apply(e,arguments)}("Could not find icon",a),null;var f=s.abstract,p={};return Object.keys(e).forEach((function(t){U.defaultProps.hasOwnProperty(t)||(p[t]=e[t])})),$(f[0],p)}U.displayName="FontAwesomeIcon",U.propTypes={border:z.a.bool,className:z.a.string,mask:z.a.oneOfType([z.a.object,z.a.array,z.a.string]),fixedWidth:z.a.bool,inverse:z.a.bool,flip:z.a.oneOf(["horizontal","vertical","both"]),icon:z.a.oneOfType([z.a.object,z.a.array,z.a.string]),listItem:z.a.bool,pull:z.a.oneOf(["right","left"]),pulse:z.a.bool,rotation:z.a.oneOf([90,180,270]),size:z.a.oneOf(["lg","xs","sm","1x","2x","3x","4x","5x","6x","7x","8x","9x","10x"]),spin:z.a.bool,symbol:z.a.oneOfType([z.a.bool,z.a.string]),title:z.a.string,transform:z.a.oneOfType([z.a.string,z.a.object]),swapOpacity:z.a.bool},U.defaultProps={border:!1,className:"",mask:null,fixedWidth:!1,inverse:!1,flip:null,icon:null,listItem:!1,pull:null,pulse:!1,rotation:null,size:null,spin:!1,symbol:!1,title:"",transform:null,swapOpacity:!1};var $=function e(t,n){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if("string"==typeof n)return n;var o=(n.children||[]).map((function(n){return e(t,n)})),i=Object.keys(n.attributes||{}).reduce((function(e,t){var r=n.attributes[t];switch(t){case"class":e.attrs.className=r,delete n.attributes.class;break;case"style":e.attrs.style=V(r);break;default:0===t.indexOf("aria-")||0===t.indexOf("data-")?e.attrs[t.toLowerCase()]=r:e.attrs[N(t)]=r}return e}),{attrs:{}}),a=r.style,l=void 0===a?{}:a,u=I(r,["style"]);return i.attrs.style=R({},i.attrs.style,{},l),t.apply(void 0,[n.tag,R({},i.attrs,{},u)].concat(L(o)))}.bind(null,o.a.createElement),Y=n(3),Q=function(){return(Q=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},X=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n},G=Object(k.default)((function(e){e.color,e.fontSize,e.opacity;var t=X(e,["color","fontSize","opacity"]);return r.createElement(U,Q({},t))}))(Object(Y.compose)(Y.space,Y.color,Y.fontSize));function q(e){var t=e.icon,n=X(e,["icon"]),o="string"==typeof t?["fal",t]:t;return r.createElement(G,Q({icon:o,style:{width:"1em"}},n))}q.defaultProps={color:"inherit",fontSize:"inherit"};var K=q,Z=function(){return(Z=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},J=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n},ee=function(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var r,o,i=n.call(e),a=[];try{for(;(void 0===t||t-- >0)&&!(r=i.next()).done;)a.push(r.value)}catch(e){o={error:e}}finally{try{r&&!r.done&&(n=i.return)&&n.call(i)}finally{if(o)throw o.error}}return a};var te=function(e){e.ref;var t=e.src,n=e.placeholder,o=J(e,["ref","src","placeholder"]),i=ee(r.useState(!1),2),a=i[0],l=i[1];return r.useEffect((function(){var e,n=new Image;t&&(e=ee([t,function(){return l(!0)},function(){return l(!1)}],3),n.src=e[0],n.onerror=e[1],n.onload=e[2])}),[t]),r.createElement(r.Fragment,null,a&&n?n:r.createElement(p,Z({src:t},o)))};var ne=function(){return(ne=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},re=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var oe=function(e){var t=e.sx,n=e.loading,o=e.inheritColor,i=e.variant,a=void 0===i?"bodyPrimary":i,l=re(e,["sx","loading","inheritColor","variant"]);return r.createElement(c,ne({as:"code"===a?"code":"p",variant:"text."+a},l,{sx:ne(ne(ne({display:"block",fontWeight:400,letterSpacing:n?"0px important":0,m:0,maxWidth:"100%",overflow:"hidden",textOverflow:"ellipsis"},o?{color:"inherit !important"}:{}),n?{color:"neutral_1 !important"}:{}),t||{})}))},ie=n(1),ae=function(){};function le(e,t){return t?"-"===t[0]?e+t:e+"__"+t:e}function ue(e,t,n){var r=[n];if(t&&e)for(var o in t)t.hasOwnProperty(o)&&t[o]&&r.push(""+le(e,o));return r.filter((function(e){return e})).map((function(e){return String(e).trim()})).join(" ")}var ce=function(e){return Array.isArray(e)?e.filter(Boolean):"object"==typeof e&&null!==e?[e]:[]};function se(e){return[document.documentElement,document.body,window].indexOf(e)>-1}function fe(e){return se(e)?window.pageYOffset:e.scrollTop}function pe(e,t){se(e)?window.scrollTo(0,t):e.scrollTop=t}function de(e,t,n,r){void 0===n&&(n=200),void 0===r&&(r=ae);var o=fe(e),i=t-o,a=0;!function t(){var l,u=i*((l=(l=a+=10)/n-1)*l*l+1)+o;pe(e,u),a<n?window.requestAnimationFrame(t):r(e)}()}function he(){try{return document.createEvent("TouchEvent"),!0}catch(e){return!1}}var me=n(6),ge=n(20),ve=n.n(ge);function be(){return(be=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function ye(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,e.__proto__=t}function we(e){var t=e.maxHeight,n=e.menuEl,r=e.minHeight,o=e.placement,i=e.shouldScroll,a=e.isFixedPosition,l=e.theme.spacing,u=function(e){var t=getComputedStyle(e),n="absolute"===t.position,r=/(auto|scroll)/,o=document.documentElement;if("fixed"===t.position)return o;for(var i=e;i=i.parentElement;)if(t=getComputedStyle(i),(!n||"static"!==t.position)&&r.test(t.overflow+t.overflowY+t.overflowX))return i;return o}(n),c={placement:"bottom",maxHeight:t};if(!n||!n.offsetParent)return c;var s=u.getBoundingClientRect().height,f=n.getBoundingClientRect(),p=f.bottom,d=f.height,h=f.top,m=n.offsetParent.getBoundingClientRect().top,g=window.innerHeight,v=fe(u),b=parseInt(getComputedStyle(n).marginBottom,10),y=parseInt(getComputedStyle(n).marginTop,10),w=m-y,x=g-h,E=w+v,k=s-v-h,S=p-g+v+b,O=v+h-y;switch(o){case"auto":case"bottom":if(x>=d)return{placement:"bottom",maxHeight:t};if(k>=d&&!a)return i&&de(u,S,160),{placement:"bottom",maxHeight:t};if(!a&&k>=r||a&&x>=r)return i&&de(u,S,160),{placement:"bottom",maxHeight:a?x-b:k-b};if("auto"===o||a){var C=t,T=a?w:E;return T>=r&&(C=Math.min(T-b-l.controlHeight,t)),{placement:"top",maxHeight:C}}if("bottom"===o)return pe(u,S),{placement:"bottom",maxHeight:t};break;case"top":if(w>=d)return{placement:"top",maxHeight:t};if(E>=d&&!a)return i&&de(u,O,160),{placement:"top",maxHeight:t};if(!a&&E>=r||a&&w>=r){var _=t;return(!a&&E>=r||a&&w>=r)&&(_=a?w-y:E-y),i&&de(u,O,160),{placement:"top",maxHeight:_}}return{placement:"bottom",maxHeight:t};default:throw new Error('Invalid placement provided "'+o+'".')}return c}var xe=function(e){return"auto"===e?"bottom":e},Ee=function(e){function t(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).state={maxHeight:t.props.maxMenuHeight,placement:null},t.getPlacement=function(e){var n=t.props,r=n.minMenuHeight,o=n.maxMenuHeight,i=n.menuPlacement,a=n.menuPosition,l=n.menuShouldScrollIntoView,u=n.theme,c=t.context.getPortalPlacement;if(e){var s="fixed"===a,f=we({maxHeight:o,menuEl:e,minHeight:r,placement:i,shouldScroll:l&&!s,isFixedPosition:s,theme:u});c&&c(f),t.setState(f)}},t.getUpdatedProps=function(){var e=t.props.menuPlacement,n=t.state.placement||xe(e);return be({},t.props,{placement:n,maxHeight:t.state.maxHeight})},t}return ye(t,e),t.prototype.render=function(){return(0,this.props.children)({ref:this.getPlacement,placerProps:this.getUpdatedProps()})},t}(r.Component);Ee.contextTypes={getPortalPlacement:z.a.func};var ke=function(e){var t=e.theme,n=t.spacing.baseUnit;return{color:t.colors.neutral40,padding:2*n+"px "+3*n+"px",textAlign:"center"}},Se=ke,Oe=ke,Ce=function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps;return Object(ie.d)("div",be({css:o("noOptionsMessage",e),className:r({"menu-notice":!0,"menu-notice--no-options":!0},n)},i),t)};Ce.defaultProps={children:"No options"};var Te=function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps;return Object(ie.d)("div",be({css:o("loadingMessage",e),className:r({"menu-notice":!0,"menu-notice--loading":!0},n)},i),t)};Te.defaultProps={children:"Loading..."};var _e=function(e){function t(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).state={placement:null},t.getPortalPlacement=function(e){var n=e.placement;n!==xe(t.props.menuPlacement)&&t.setState({placement:n})},t}ye(t,e);var n=t.prototype;return n.getChildContext=function(){return{getPortalPlacement:this.getPortalPlacement}},n.render=function(){var e=this.props,t=e.appendTo,n=e.children,r=e.controlElement,o=e.menuPlacement,a=e.menuPosition,l=e.getStyles,u="fixed"===a;if(!t&&!u||!r)return null;var c=this.state.placement||xe(o),s=function(e){var t=e.getBoundingClientRect();return{bottom:t.bottom,height:t.height,left:t.left,right:t.right,top:t.top,width:t.width}}(r),f=u?0:window.pageYOffset,p={offset:s[c]+f,position:a,rect:s},d=Object(ie.d)("div",{css:l("menuPortal",p)},n);return t?Object(i.createPortal)(d,t):d},t}(r.Component);_e.childContextTypes={getPortalPlacement:z.a.func};var Pe=Array.isArray,Ae=Object.keys,Me=Object.prototype.hasOwnProperty;function ze(e,t){try{return function e(t,n){if(t===n)return!0;if(t&&n&&"object"==typeof t&&"object"==typeof n){var r,o,i,a=Pe(t),l=Pe(n);if(a&&l){if((o=t.length)!=n.length)return!1;for(r=o;0!=r--;)if(!e(t[r],n[r]))return!1;return!0}if(a!=l)return!1;var u=t instanceof Date,c=n instanceof Date;if(u!=c)return!1;if(u&&c)return t.getTime()==n.getTime();var s=t instanceof RegExp,f=n instanceof RegExp;if(s!=f)return!1;if(s&&f)return t.toString()==n.toString();var p=Ae(t);if((o=p.length)!==Ae(n).length)return!1;for(r=o;0!=r--;)if(!Me.call(n,p[r]))return!1;for(r=o;0!=r--;)if(!("_owner"===(i=p[r])&&t.$$typeof||e(t[i],n[i])))return!1;return!0}return t!=t&&n!=n}(e,t)}catch(e){if(e.message&&e.message.match(/stack|recursion/i))return console.warn("Warning: react-fast-compare does not handle circular references.",e.name,e.message),!1;throw e}}function Fe(){return(Fe=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function je(){var e=function(e,t){t||(t=e.slice(0));return e.raw=t,e}(["\n 0%, 80%, 100% { opacity: 0; }\n 40% { opacity: 1; }\n"]);return je=function(){return e},e}function De(){return(De=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var Re={name:"19bqh2r",styles:"display:inline-block;fill:currentColor;line-height:1;stroke:currentColor;stroke-width:0;"},Ie=function(e){var t=e.size,n=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,["size"]);return Object(ie.d)("svg",De({height:t,width:t,viewBox:"0 0 20 20","aria-hidden":"true",focusable:"false",css:Re},n))},Le=function(e){return Object(ie.d)(Ie,De({size:20},e),Object(ie.d)("path",{d:"M14.348 14.849c-0.469 0.469-1.229 0.469-1.697 0l-2.651-3.030-2.651 3.029c-0.469 0.469-1.229 0.469-1.697 0-0.469-0.469-0.469-1.229 0-1.697l2.758-3.15-2.759-3.152c-0.469-0.469-0.469-1.228 0-1.697s1.228-0.469 1.697 0l2.652 3.031 2.651-3.031c0.469-0.469 1.228-0.469 1.697 0s0.469 1.229 0 1.697l-2.758 3.152 2.758 3.15c0.469 0.469 0.469 1.229 0 1.698z"}))},Ne=function(e){return Object(ie.d)(Ie,De({size:20},e),Object(ie.d)("path",{d:"M4.516 7.548c0.436-0.446 1.043-0.481 1.576 0l3.908 3.747 3.908-3.747c0.533-0.481 1.141-0.446 1.574 0 0.436 0.445 0.408 1.197 0 1.615-0.406 0.418-4.695 4.502-4.695 4.502-0.217 0.223-0.502 0.335-0.787 0.335s-0.57-0.112-0.789-0.335c0 0-4.287-4.084-4.695-4.502s-0.436-1.17 0-1.615z"}))},Ve=function(e){var t=e.isFocused,n=e.theme,r=n.spacing.baseUnit,o=n.colors;return{label:"indicatorContainer",color:t?o.neutral60:o.neutral20,display:"flex",padding:2*r,transition:"color 150ms",":hover":{color:t?o.neutral80:o.neutral40}}},He=Ve,Be=Ve,We=Object(ie.e)(je()),Ue=function(e){var t=e.delay,n=e.offset;return Object(ie.d)("span",{css:Object(me.a)({animation:We+" 1s ease-in-out "+t+"ms infinite;",backgroundColor:"currentColor",borderRadius:"1em",display:"inline-block",marginLeft:n?"1em":null,height:"1em",verticalAlign:"top",width:"1em"},"")})},$e=function(e){var t=e.className,n=e.cx,r=e.getStyles,o=e.innerProps,i=e.isRtl;return Object(ie.d)("div",De({},o,{css:r("loadingIndicator",e),className:n({indicator:!0,"loading-indicator":!0},t)}),Object(ie.d)(Ue,{delay:0,offset:i}),Object(ie.d)(Ue,{delay:160,offset:!0}),Object(ie.d)(Ue,{delay:320,offset:!i}))};function Ye(){return(Ye=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}$e.defaultProps={size:4};function Qe(){return(Qe=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function Xe(){return(Xe=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var Ge=function(e){return{label:"input",background:0,border:0,fontSize:"inherit",opacity:e?0:1,outline:0,padding:0,color:"inherit"}};function qe(){return(qe=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var Ke=function(e){var t=e.children,n=e.innerProps;return Object(ie.d)("div",n,t)},Ze=Ke,Je=Ke;var et=function(e){var t=e.children,n=e.className,r=e.components,o=e.cx,i=e.data,a=e.getStyles,l=e.innerProps,u=e.isDisabled,c=e.removeProps,s=e.selectProps,f=r.Container,p=r.Label,d=r.Remove;return Object(ie.d)(ie.b,null,(function(r){var h=r.css,m=r.cx;return Object(ie.d)(f,{data:i,innerProps:qe({},l,{className:m(h(a("multiValue",e)),o({"multi-value":!0,"multi-value--is-disabled":u},n))}),selectProps:s},Object(ie.d)(p,{data:i,innerProps:{className:m(h(a("multiValueLabel",e)),o({"multi-value__label":!0},n))},selectProps:s},t),Object(ie.d)(d,{data:i,innerProps:qe({className:m(h(a("multiValueRemove",e)),o({"multi-value__remove":!0},n))},c),selectProps:s}))}))};function tt(){return(tt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}et.defaultProps={cropWithEllipsis:!0};function nt(){return(nt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function rt(){return(rt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function ot(){return(ot=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var it={ClearIndicator:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps;return Object(ie.d)("div",De({},i,{css:o("clearIndicator",e),className:r({indicator:!0,"clear-indicator":!0},n)}),t||Object(ie.d)(Le,null))},Control:function(e){var t=e.children,n=e.cx,r=e.getStyles,o=e.className,i=e.isDisabled,a=e.isFocused,l=e.innerRef,u=e.innerProps,c=e.menuIsOpen;return Object(ie.d)("div",Ye({ref:l,css:r("control",e),className:n({control:!0,"control--is-disabled":i,"control--is-focused":a,"control--menu-is-open":c},o)},u),t)},DropdownIndicator:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps;return Object(ie.d)("div",De({},i,{css:o("dropdownIndicator",e),className:r({indicator:!0,"dropdown-indicator":!0},n)}),t||Object(ie.d)(Ne,null))},DownChevron:Ne,CrossIcon:Le,Group:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.Heading,a=e.headingProps,l=e.label,u=e.theme,c=e.selectProps;return Object(ie.d)("div",{css:o("group",e),className:r({group:!0},n)},Object(ie.d)(i,Qe({},a,{selectProps:c,theme:u,getStyles:o,cx:r}),l),Object(ie.d)("div",null,t))},GroupHeading:function(e){var t=e.className,n=e.cx,r=e.getStyles,o=e.theme,i=(e.selectProps,function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,["className","cx","getStyles","theme","selectProps"]));return Object(ie.d)("div",Qe({css:r("groupHeading",Qe({theme:o},i)),className:n({"group-heading":!0},t)},i))},IndicatorsContainer:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles;return Object(ie.d)("div",{css:o("indicatorsContainer",e),className:r({indicators:!0},n)},t)},IndicatorSeparator:function(e){var t=e.className,n=e.cx,r=e.getStyles,o=e.innerProps;return Object(ie.d)("span",De({},o,{css:r("indicatorSeparator",e),className:n({"indicator-separator":!0},t)}))},Input:function(e){var t=e.className,n=e.cx,r=e.getStyles,o=e.innerRef,i=e.isHidden,a=e.isDisabled,l=e.theme,u=(e.selectProps,function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,["className","cx","getStyles","innerRef","isHidden","isDisabled","theme","selectProps"]));return Object(ie.d)("div",{css:r("input",Xe({theme:l},u))},Object(ie.d)(ve.a,Xe({className:n({input:!0},t),inputRef:o,inputStyle:Ge(i),disabled:a},u)))},LoadingIndicator:$e,Menu:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerRef,a=e.innerProps;return Object(ie.d)("div",be({css:o("menu",e),className:r({menu:!0},n)},a,{ref:i}),t)},MenuList:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.isMulti,a=e.innerRef;return Object(ie.d)("div",{css:o("menuList",e),className:r({"menu-list":!0,"menu-list--is-multi":i},n),ref:a},t)},MenuPortal:_e,LoadingMessage:Te,NoOptionsMessage:Ce,MultiValue:et,MultiValueContainer:Ze,MultiValueLabel:Je,MultiValueRemove:function(e){var t=e.children,n=e.innerProps;return Object(ie.d)("div",n,t||Object(ie.d)(Le,{size:14}))},Option:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.isDisabled,a=e.isFocused,l=e.isSelected,u=e.innerRef,c=e.innerProps;return Object(ie.d)("div",tt({css:o("option",e),className:r({option:!0,"option--is-disabled":i,"option--is-focused":a,"option--is-selected":l},n),ref:u},c),t)},Placeholder:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps;return Object(ie.d)("div",nt({css:o("placeholder",e),className:r({placeholder:!0},n)},i),t)},SelectContainer:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.innerProps,a=e.isDisabled,l=e.isRtl;return Object(ie.d)("div",Fe({css:o("container",e),className:r({"--is-disabled":a,"--is-rtl":l},n)},i),t)},SingleValue:function(e){var t=e.children,n=e.className,r=e.cx,o=e.getStyles,i=e.isDisabled,a=e.innerProps;return Object(ie.d)("div",rt({css:o("singleValue",e),className:r({"single-value":!0,"single-value--is-disabled":i},n)},a),t)},ValueContainer:function(e){var t=e.children,n=e.className,r=e.cx,o=e.isMulti,i=e.getStyles,a=e.hasValue;return Object(ie.d)("div",{css:i("valueContainer",e),className:r({"value-container":!0,"value-container--is-multi":o,"value-container--has-value":a},n)},t)}};function at(e,t){if(e.length!==t.length)return!1;for(var n=0;n<e.length;n++)if(e[n]!==t[n])return!1;return!0}var lt=function(e,t){var n;void 0===t&&(t=at);var r,o=[],i=!1;return function(){for(var a=[],l=0;l<arguments.length;l++)a[l]=arguments[l];return i&&n===this&&t(a,o)||(r=e.apply(this,a),i=!0,n=this,o=a),r}},ut=[{base:"A",letters:/[\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F]/g},{base:"AA",letters:/[\uA732]/g},{base:"AE",letters:/[\u00C6\u01FC\u01E2]/g},{base:"AO",letters:/[\uA734]/g},{base:"AU",letters:/[\uA736]/g},{base:"AV",letters:/[\uA738\uA73A]/g},{base:"AY",letters:/[\uA73C]/g},{base:"B",letters:/[\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181]/g},{base:"C",letters:/[\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E]/g},{base:"D",letters:/[\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779]/g},{base:"DZ",letters:/[\u01F1\u01C4]/g},{base:"Dz",letters:/[\u01F2\u01C5]/g},{base:"E",letters:/[\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E]/g},{base:"F",letters:/[\u0046\u24BB\uFF26\u1E1E\u0191\uA77B]/g},{base:"G",letters:/[\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E]/g},{base:"H",letters:/[\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D]/g},{base:"I",letters:/[\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197]/g},{base:"J",letters:/[\u004A\u24BF\uFF2A\u0134\u0248]/g},{base:"K",letters:/[\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2]/g},{base:"L",letters:/[\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780]/g},{base:"LJ",letters:/[\u01C7]/g},{base:"Lj",letters:/[\u01C8]/g},{base:"M",letters:/[\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C]/g},{base:"N",letters:/[\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4]/g},{base:"NJ",letters:/[\u01CA]/g},{base:"Nj",letters:/[\u01CB]/g},{base:"O",letters:/[\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C]/g},{base:"OI",letters:/[\u01A2]/g},{base:"OO",letters:/[\uA74E]/g},{base:"OU",letters:/[\u0222]/g},{base:"P",letters:/[\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754]/g},{base:"Q",letters:/[\u0051\u24C6\uFF31\uA756\uA758\u024A]/g},{base:"R",letters:/[\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782]/g},{base:"S",letters:/[\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784]/g},{base:"T",letters:/[\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786]/g},{base:"TZ",letters:/[\uA728]/g},{base:"U",letters:/[\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244]/g},{base:"V",letters:/[\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245]/g},{base:"VY",letters:/[\uA760]/g},{base:"W",letters:/[\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72]/g},{base:"X",letters:/[\u0058\u24CD\uFF38\u1E8A\u1E8C]/g},{base:"Y",letters:/[\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE]/g},{base:"Z",letters:/[\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762]/g},{base:"a",letters:/[\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250]/g},{base:"aa",letters:/[\uA733]/g},{base:"ae",letters:/[\u00E6\u01FD\u01E3]/g},{base:"ao",letters:/[\uA735]/g},{base:"au",letters:/[\uA737]/g},{base:"av",letters:/[\uA739\uA73B]/g},{base:"ay",letters:/[\uA73D]/g},{base:"b",letters:/[\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253]/g},{base:"c",letters:/[\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184]/g},{base:"d",letters:/[\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A]/g},{base:"dz",letters:/[\u01F3\u01C6]/g},{base:"e",letters:/[\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD]/g},{base:"f",letters:/[\u0066\u24D5\uFF46\u1E1F\u0192\uA77C]/g},{base:"g",letters:/[\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F]/g},{base:"h",letters:/[\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265]/g},{base:"hv",letters:/[\u0195]/g},{base:"i",letters:/[\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131]/g},{base:"j",letters:/[\u006A\u24D9\uFF4A\u0135\u01F0\u0249]/g},{base:"k",letters:/[\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3]/g},{base:"l",letters:/[\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747]/g},{base:"lj",letters:/[\u01C9]/g},{base:"m",letters:/[\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F]/g},{base:"n",letters:/[\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5]/g},{base:"nj",letters:/[\u01CC]/g},{base:"o",letters:/[\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275]/g},{base:"oi",letters:/[\u01A3]/g},{base:"ou",letters:/[\u0223]/g},{base:"oo",letters:/[\uA74F]/g},{base:"p",letters:/[\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755]/g},{base:"q",letters:/[\u0071\u24E0\uFF51\u024B\uA757\uA759]/g},{base:"r",letters:/[\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783]/g},{base:"s",letters:/[\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B]/g},{base:"t",letters:/[\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787]/g},{base:"tz",letters:/[\uA729]/g},{base:"u",letters:/[\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289]/g},{base:"v",letters:/[\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C]/g},{base:"vy",letters:/[\uA761]/g},{base:"w",letters:/[\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73]/g},{base:"x",letters:/[\u0078\u24E7\uFF58\u1E8B\u1E8D]/g},{base:"y",letters:/[\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF]/g},{base:"z",letters:/[\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763]/g}],ct=function(e){for(var t=0;t<ut.length;t++)e=e.replace(ut[t].letters,ut[t].base);return e};function st(){return(st=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var ft=function(e){return e.replace(/^\s+|\s+$/g,"")},pt=function(e){return e.label+" "+e.value};function dt(){return(dt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}var ht={name:"1laao21-a11yText",styles:"label:a11yText;z-index:9999;border:0;clip:rect(1px, 1px, 1px, 1px);height:1px;width:1px;position:absolute;overflow:hidden;padding:0;white-space:nowrap;"},mt=function(e){return Object(ie.d)("span",dt({css:ht},e))};function gt(){return(gt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function vt(e){e.in,e.out,e.onExited,e.appear,e.enter,e.exit;var t=e.innerRef,n=(e.emotion,function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,["in","out","onExited","appear","enter","exit","innerRef","emotion"]));return Object(ie.d)("input",gt({ref:t},n,{css:Object(me.a)({label:"dummyInput",background:0,border:0,fontSize:"inherit",outline:0,padding:0,width:1,color:"transparent",left:-100,opacity:0,position:"relative",transform:"scale(0)"},"")}))}var bt=function(e){var t,n;function r(){return e.apply(this,arguments)||this}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var o=r.prototype;return o.componentDidMount=function(){this.props.innerRef(Object(i.findDOMNode)(this))},o.componentWillUnmount=function(){this.props.innerRef(null)},o.render=function(){return this.props.children},r}(r.Component),yt=["boxSizing","height","overflow","paddingRight","position"],wt={boxSizing:"border-box",overflow:"hidden",position:"relative",height:"100%"};function xt(e){e.preventDefault()}function Et(e){e.stopPropagation()}function kt(){var e=this.scrollTop,t=this.scrollHeight,n=e+this.offsetHeight;0===e?this.scrollTop=1:n===t&&(this.scrollTop=e-1)}function St(){return"ontouchstart"in window||navigator.maxTouchPoints}var Ot=!(!window.document||!window.document.createElement),Ct=0,Tt=function(e){var t,n;function r(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).originalStyles={},t.listenerOptions={capture:!1,passive:!1},t}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var o=r.prototype;return o.componentDidMount=function(){var e=this;if(Ot){var t=this.props,n=t.accountForScrollbars,r=t.touchScrollTarget,o=document.body,i=o&&o.style;if(n&&yt.forEach((function(t){var n=i&&i[t];e.originalStyles[t]=n})),n&&Ct<1){var a=parseInt(this.originalStyles.paddingRight,10)||0,l=document.body?document.body.clientWidth:0,u=window.innerWidth-l+a||0;Object.keys(wt).forEach((function(e){var t=wt[e];i&&(i[e]=t)})),i&&(i.paddingRight=u+"px")}o&&St()&&(o.addEventListener("touchmove",xt,this.listenerOptions),r&&(r.addEventListener("touchstart",kt,this.listenerOptions),r.addEventListener("touchmove",Et,this.listenerOptions))),Ct+=1}},o.componentWillUnmount=function(){var e=this;if(Ot){var t=this.props,n=t.accountForScrollbars,r=t.touchScrollTarget,o=document.body,i=o&&o.style;Ct=Math.max(Ct-1,0),n&&Ct<1&&yt.forEach((function(t){var n=e.originalStyles[t];i&&(i[t]=n)})),o&&St()&&(o.removeEventListener("touchmove",xt,this.listenerOptions),r&&(r.removeEventListener("touchstart",kt,this.listenerOptions),r.removeEventListener("touchmove",Et,this.listenerOptions)))}},o.render=function(){return null},r}(r.Component);Tt.defaultProps={accountForScrollbars:!0};var _t={name:"1dsbpcp",styles:"position:fixed;left:0;bottom:0;right:0;top:0;"},Pt=function(e){var t,n;function r(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).state={touchScrollTarget:null},t.getScrollTarget=function(e){e!==t.state.touchScrollTarget&&t.setState({touchScrollTarget:e})},t.blurSelectInput=function(){document.activeElement&&document.activeElement.blur()},t}return n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n,r.prototype.render=function(){var e=this.props,t=e.children,n=e.isEnabled,r=this.state.touchScrollTarget;return n?Object(ie.d)("div",null,Object(ie.d)("div",{onClick:this.blurSelectInput,css:_t}),Object(ie.d)(bt,{innerRef:this.getScrollTarget},t),r?Object(ie.d)(Tt,{touchScrollTarget:r}):null):t},r}(r.PureComponent);var At=function(e){var t,n;function r(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).isBottom=!1,t.isTop=!1,t.scrollTarget=void 0,t.touchStart=void 0,t.cancelScroll=function(e){e.preventDefault(),e.stopPropagation()},t.handleEventDelta=function(e,n){var r=t.props,o=r.onBottomArrive,i=r.onBottomLeave,a=r.onTopArrive,l=r.onTopLeave,u=t.scrollTarget,c=u.scrollTop,s=u.scrollHeight,f=u.clientHeight,p=t.scrollTarget,d=n>0,h=s-f-c,m=!1;h>n&&t.isBottom&&(i&&i(e),t.isBottom=!1),d&&t.isTop&&(l&&l(e),t.isTop=!1),d&&n>h?(o&&!t.isBottom&&o(e),p.scrollTop=s,m=!0,t.isBottom=!0):!d&&-n>c&&(a&&!t.isTop&&a(e),p.scrollTop=0,m=!0,t.isTop=!0),m&&t.cancelScroll(e)},t.onWheel=function(e){t.handleEventDelta(e,e.deltaY)},t.onTouchStart=function(e){t.touchStart=e.changedTouches[0].clientY},t.onTouchMove=function(e){var n=t.touchStart-e.changedTouches[0].clientY;t.handleEventDelta(e,n)},t.getScrollTarget=function(e){t.scrollTarget=e},t}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var i=r.prototype;return i.componentDidMount=function(){this.startListening(this.scrollTarget)},i.componentWillUnmount=function(){this.stopListening(this.scrollTarget)},i.startListening=function(e){e&&("function"==typeof e.addEventListener&&e.addEventListener("wheel",this.onWheel,!1),"function"==typeof e.addEventListener&&e.addEventListener("touchstart",this.onTouchStart,!1),"function"==typeof e.addEventListener&&e.addEventListener("touchmove",this.onTouchMove,!1))},i.stopListening=function(e){"function"==typeof e.removeEventListener&&e.removeEventListener("wheel",this.onWheel,!1),"function"==typeof e.removeEventListener&&e.removeEventListener("touchstart",this.onTouchStart,!1),"function"==typeof e.removeEventListener&&e.removeEventListener("touchmove",this.onTouchMove,!1)},i.render=function(){return o.a.createElement(bt,{innerRef:this.getScrollTarget},this.props.children)},r}(r.Component);function Mt(e){var t=e.isEnabled,n=void 0===t||t,r=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(e,["isEnabled"]);return n?o.a.createElement(At,r):r.children}var zt=function(e,t){void 0===t&&(t={});var n=t,r=n.isSearchable,o=n.isMulti,i=n.label,a=n.isDisabled;switch(e){case"menu":return"Use Up and Down to choose options"+(a?"":", press Enter to select the currently focused option")+", press Escape to exit the menu, press Tab to select the option and exit the menu.";case"input":return(i||"Select")+" is focused "+(r?",type to refine list":"")+", press Down to open the menu, "+(o?" press left to focus selected values":"");case"value":return"Use left and right to toggle between focused values, press Backspace to remove the currently focused value"}},Ft=function(e,t){var n=t.value,r=t.isDisabled;if(n)switch(e){case"deselect-option":case"pop-value":case"remove-value":return"option "+n+", deselected.";case"select-option":return r?"option "+n+" is disabled. Select another option.":"option "+n+", selected."}},jt=function(e){return!!e.isDisabled};var Dt={clearIndicator:Be,container:function(e){var t=e.isDisabled;return{label:"container",direction:e.isRtl?"rtl":null,pointerEvents:t?"none":null,position:"relative"}},control:function(e){var t=e.isDisabled,n=e.isFocused,r=e.theme,o=r.colors,i=r.borderRadius,a=r.spacing;return{label:"control",alignItems:"center",backgroundColor:t?o.neutral5:o.neutral0,borderColor:t?o.neutral10:n?o.primary:o.neutral20,borderRadius:i,borderStyle:"solid",borderWidth:1,boxShadow:n?"0 0 0 1px "+o.primary:null,cursor:"default",display:"flex",flexWrap:"wrap",justifyContent:"space-between",minHeight:a.controlHeight,outline:"0 !important",position:"relative",transition:"all 100ms","&:hover":{borderColor:n?o.primary:o.neutral30}}},dropdownIndicator:He,group:function(e){var t=e.theme.spacing;return{paddingBottom:2*t.baseUnit,paddingTop:2*t.baseUnit}},groupHeading:function(e){var t=e.theme.spacing;return{label:"group",color:"#999",cursor:"default",display:"block",fontSize:"75%",fontWeight:"500",marginBottom:"0.25em",paddingLeft:3*t.baseUnit,paddingRight:3*t.baseUnit,textTransform:"uppercase"}},indicatorsContainer:function(){return{alignItems:"center",alignSelf:"stretch",display:"flex",flexShrink:0}},indicatorSeparator:function(e){var t=e.isDisabled,n=e.theme,r=n.spacing.baseUnit,o=n.colors;return{label:"indicatorSeparator",alignSelf:"stretch",backgroundColor:t?o.neutral10:o.neutral20,marginBottom:2*r,marginTop:2*r,width:1}},input:function(e){var t=e.isDisabled,n=e.theme,r=n.spacing,o=n.colors;return{margin:r.baseUnit/2,paddingBottom:r.baseUnit/2,paddingTop:r.baseUnit/2,visibility:t?"hidden":"visible",color:o.neutral80}},loadingIndicator:function(e){var t=e.isFocused,n=e.size,r=e.theme,o=r.colors,i=r.spacing.baseUnit;return{label:"loadingIndicator",color:t?o.neutral60:o.neutral20,display:"flex",padding:2*i,transition:"color 150ms",alignSelf:"center",fontSize:n,lineHeight:1,marginRight:n,textAlign:"center",verticalAlign:"middle"}},loadingMessage:Oe,menu:function(e){var t,n=e.placement,r=e.theme,o=r.borderRadius,i=r.spacing,a=r.colors;return(t={label:"menu"})[function(e){return e?{bottom:"top",top:"bottom"}[e]:"bottom"}(n)]="100%",t.backgroundColor=a.neutral0,t.borderRadius=o,t.boxShadow="0 0 0 1px hsla(0, 0%, 0%, 0.1), 0 4px 11px hsla(0, 0%, 0%, 0.1)",t.marginBottom=i.menuGutter,t.marginTop=i.menuGutter,t.position="absolute",t.width="100%",t.zIndex=1,t},menuList:function(e){var t=e.maxHeight,n=e.theme.spacing.baseUnit;return{maxHeight:t,overflowY:"auto",paddingBottom:n,paddingTop:n,position:"relative",WebkitOverflowScrolling:"touch"}},menuPortal:function(e){var t=e.rect,n=e.offset,r=e.position;return{left:t.left,position:r,top:n,width:t.width,zIndex:1}},multiValue:function(e){var t=e.theme,n=t.spacing,r=t.borderRadius;return{label:"multiValue",backgroundColor:t.colors.neutral10,borderRadius:r/2,display:"flex",margin:n.baseUnit/2,minWidth:0}},multiValueLabel:function(e){var t=e.theme,n=t.borderRadius,r=t.colors,o=e.cropWithEllipsis;return{borderRadius:n/2,color:r.neutral80,fontSize:"85%",overflow:"hidden",padding:3,paddingLeft:6,textOverflow:o?"ellipsis":null,whiteSpace:"nowrap"}},multiValueRemove:function(e){var t=e.theme,n=t.spacing,r=t.borderRadius,o=t.colors;return{alignItems:"center",borderRadius:r/2,backgroundColor:e.isFocused&&o.dangerLight,display:"flex",paddingLeft:n.baseUnit,paddingRight:n.baseUnit,":hover":{backgroundColor:o.dangerLight,color:o.danger}}},noOptionsMessage:Se,option:function(e){var t=e.isDisabled,n=e.isFocused,r=e.isSelected,o=e.theme,i=o.spacing,a=o.colors;return{label:"option",backgroundColor:r?a.primary:n?a.primary25:"transparent",color:t?a.neutral20:r?a.neutral0:"inherit",cursor:"default",display:"block",fontSize:"inherit",padding:2*i.baseUnit+"px "+3*i.baseUnit+"px",width:"100%",userSelect:"none",WebkitTapHighlightColor:"rgba(0, 0, 0, 0)",":active":{backgroundColor:!t&&(r?a.primary:a.primary50)}}},placeholder:function(e){var t=e.theme,n=t.spacing;return{label:"placeholder",color:t.colors.neutral50,marginLeft:n.baseUnit/2,marginRight:n.baseUnit/2,position:"absolute",top:"50%",transform:"translateY(-50%)"}},singleValue:function(e){var t=e.isDisabled,n=e.theme,r=n.spacing,o=n.colors;return{label:"singleValue",color:t?o.neutral40:o.neutral80,marginLeft:r.baseUnit/2,marginRight:r.baseUnit/2,maxWidth:"calc(100% - "+2*r.baseUnit+"px)",overflow:"hidden",position:"absolute",textOverflow:"ellipsis",whiteSpace:"nowrap",top:"50%",transform:"translateY(-50%)"}},valueContainer:function(e){var t=e.theme.spacing;return{alignItems:"center",display:"flex",flex:1,flexWrap:"wrap",padding:t.baseUnit/2+"px "+2*t.baseUnit+"px",WebkitOverflowScrolling:"touch",position:"relative",overflow:"hidden"}}};var Rt={borderRadius:4,colors:{primary:"#2684FF",primary75:"#4C9AFF",primary50:"#B2D4FF",primary25:"#DEEBFF",danger:"#DE350B",dangerLight:"#FFBDAD",neutral0:"hsl(0, 0%, 100%)",neutral5:"hsl(0, 0%, 95%)",neutral10:"hsl(0, 0%, 90%)",neutral20:"hsl(0, 0%, 80%)",neutral30:"hsl(0, 0%, 70%)",neutral40:"hsl(0, 0%, 60%)",neutral50:"hsl(0, 0%, 50%)",neutral60:"hsl(0, 0%, 40%)",neutral70:"hsl(0, 0%, 30%)",neutral80:"hsl(0, 0%, 20%)",neutral90:"hsl(0, 0%, 10%)"},spacing:{baseUnit:4,controlHeight:38,menuGutter:8}};function It(){return(It=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}function Lt(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}var Nt,Vt={backspaceRemovesValue:!0,blurInputOnSelect:he(),captureMenuScroll:!he(),closeMenuOnSelect:!0,closeMenuOnScroll:!1,components:{},controlShouldRenderValue:!0,escapeClearsValue:!1,filterOption:function(e,t){var n=st({ignoreCase:!0,ignoreAccents:!0,stringify:pt,trim:!0,matchFrom:"any"},Nt),r=n.ignoreCase,o=n.ignoreAccents,i=n.stringify,a=n.trim,l=n.matchFrom,u=a?ft(t):t,c=a?ft(i(e)):i(e);return r&&(u=u.toLowerCase(),c=c.toLowerCase()),o&&(u=ct(u),c=ct(c)),"start"===l?c.substr(0,u.length)===u:c.indexOf(u)>-1},formatGroupLabel:function(e){return e.label},getOptionLabel:function(e){return e.label},getOptionValue:function(e){return e.value},isDisabled:!1,isLoading:!1,isMulti:!1,isRtl:!1,isSearchable:!0,isOptionDisabled:jt,loadingMessage:function(){return"Loading..."},maxMenuHeight:300,minMenuHeight:140,menuIsOpen:!1,menuPlacement:"bottom",menuPosition:"absolute",menuShouldBlockScroll:!1,menuShouldScrollIntoView:!function(){try{return/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)}catch(e){return!1}}(),noOptionsMessage:function(){return"No options"},openMenuOnFocus:!1,openMenuOnClick:!0,options:[],pageSize:5,placeholder:"Select...",screenReaderStatus:function(e){var t=e.count;return t+" result"+(1!==t?"s":"")+" available"},styles:{},tabIndex:"0",tabSelectsValue:!0},Ht=1,Bt=function(e){var t,n;function r(t){var n;(n=e.call(this,t)||this).state={ariaLiveSelection:"",ariaLiveContext:"",focusedOption:null,focusedValue:null,inputIsHidden:!1,isFocused:!1,menuOptions:{render:[],focusable:[]},selectValue:[]},n.blockOptionHover=!1,n.isComposing=!1,n.clearFocusValueOnUpdate=!1,n.commonProps=void 0,n.components=void 0,n.hasGroups=!1,n.initialTouchX=0,n.initialTouchY=0,n.inputIsHiddenAfterUpdate=void 0,n.instancePrefix="",n.openAfterFocus=!1,n.scrollToFocusedOptionOnUpdate=!1,n.userIsDragging=void 0,n.controlRef=null,n.getControlRef=function(e){n.controlRef=e},n.focusedOptionRef=null,n.getFocusedOptionRef=function(e){n.focusedOptionRef=e},n.menuListRef=null,n.getMenuListRef=function(e){n.menuListRef=e},n.inputRef=null,n.getInputRef=function(e){n.inputRef=e},n.cacheComponents=function(e){n.components=ot({},it,{components:e}.components)},n.focus=n.focusInput,n.blur=n.blurInput,n.onChange=function(e,t){var r=n.props;(0,r.onChange)(e,It({},t,{name:r.name}))},n.setValue=function(e,t,r){void 0===t&&(t="set-value");var o=n.props,i=o.closeMenuOnSelect,a=o.isMulti;n.onInputChange("",{action:"set-value"}),i&&(n.inputIsHiddenAfterUpdate=!a,n.onMenuClose()),n.clearFocusValueOnUpdate=!0,n.onChange(e,{action:t,option:r})},n.selectOption=function(e){var t=n.props,r=t.blurInputOnSelect,o=t.isMulti,i=n.state.selectValue;if(o)if(n.isOptionSelected(e,i)){var a=n.getOptionValue(e);n.setValue(i.filter((function(e){return n.getOptionValue(e)!==a})),"deselect-option",e),n.announceAriaLiveSelection({event:"deselect-option",context:{value:n.getOptionLabel(e)}})}else n.isOptionDisabled(e,i)?n.announceAriaLiveSelection({event:"select-option",context:{value:n.getOptionLabel(e),isDisabled:!0}}):(n.setValue([].concat(i,[e]),"select-option",e),n.announceAriaLiveSelection({event:"select-option",context:{value:n.getOptionLabel(e)}}));else n.isOptionDisabled(e,i)?n.announceAriaLiveSelection({event:"select-option",context:{value:n.getOptionLabel(e),isDisabled:!0}}):(n.setValue(e,"select-option"),n.announceAriaLiveSelection({event:"select-option",context:{value:n.getOptionLabel(e)}}));r&&n.blurInput()},n.removeValue=function(e){var t=n.state.selectValue,r=n.getOptionValue(e),o=t.filter((function(e){return n.getOptionValue(e)!==r}));n.onChange(o.length?o:null,{action:"remove-value",removedValue:e}),n.announceAriaLiveSelection({event:"remove-value",context:{value:e?n.getOptionLabel(e):""}}),n.focusInput()},n.clearValue=function(){var e=n.props.isMulti;n.onChange(e?[]:null,{action:"clear"})},n.popValue=function(){var e=n.state.selectValue,t=e[e.length-1],r=e.slice(0,e.length-1);n.announceAriaLiveSelection({event:"pop-value",context:{value:t?n.getOptionLabel(t):""}}),n.onChange(r.length?r:null,{action:"pop-value",removedValue:t})},n.getOptionLabel=function(e){return n.props.getOptionLabel(e)},n.getOptionValue=function(e){return n.props.getOptionValue(e)},n.getStyles=function(e,t){var r=Dt[e](t);r.boxSizing="border-box";var o=n.props.styles[e];return o?o(r,t):r},n.getElementId=function(e){return n.instancePrefix+"-"+e},n.getActiveDescendentId=function(){var e=n.props.menuIsOpen,t=n.state,r=t.menuOptions,o=t.focusedOption;if(o&&e){var i=r.focusable.indexOf(o),a=r.render[i];return a&&a.key}},n.announceAriaLiveSelection=function(e){var t=e.event,r=e.context;n.setState({ariaLiveSelection:Ft(t,r)})},n.announceAriaLiveContext=function(e){var t=e.event,r=e.context;n.setState({ariaLiveContext:zt(t,It({},r,{label:n.props["aria-label"]}))})},n.onMenuMouseDown=function(e){0===e.button&&(e.stopPropagation(),e.preventDefault(),n.focusInput())},n.onMenuMouseMove=function(e){n.blockOptionHover=!1},n.onControlMouseDown=function(e){var t=n.props.openMenuOnClick;n.state.isFocused?n.props.menuIsOpen?"INPUT"!==e.target.tagName&&"TEXTAREA"!==e.target.tagName&&n.onMenuClose():t&&n.openMenu("first"):(t&&(n.openAfterFocus=!0),n.focusInput()),"INPUT"!==e.target.tagName&&"TEXTAREA"!==e.target.tagName&&e.preventDefault()},n.onDropdownIndicatorMouseDown=function(e){if(!(e&&"mousedown"===e.type&&0!==e.button||n.props.isDisabled)){var t=n.props,r=t.isMulti,o=t.menuIsOpen;n.focusInput(),o?(n.inputIsHiddenAfterUpdate=!r,n.onMenuClose()):n.openMenu("first"),e.preventDefault(),e.stopPropagation()}},n.onClearIndicatorMouseDown=function(e){e&&"mousedown"===e.type&&0!==e.button||(n.clearValue(),e.stopPropagation(),n.openAfterFocus=!1,"touchend"===e.type?n.focusInput():setTimeout((function(){return n.focusInput()})))},n.onScroll=function(e){"boolean"==typeof n.props.closeMenuOnScroll?e.target instanceof HTMLElement&&se(e.target)&&n.props.onMenuClose():"function"==typeof n.props.closeMenuOnScroll&&n.props.closeMenuOnScroll(e)&&n.props.onMenuClose()},n.onCompositionStart=function(){n.isComposing=!0},n.onCompositionEnd=function(){n.isComposing=!1},n.onTouchStart=function(e){var t=e.touches.item(0);t&&(n.initialTouchX=t.clientX,n.initialTouchY=t.clientY,n.userIsDragging=!1)},n.onTouchMove=function(e){var t=e.touches.item(0);if(t){var r=Math.abs(t.clientX-n.initialTouchX),o=Math.abs(t.clientY-n.initialTouchY);n.userIsDragging=r>5||o>5}},n.onTouchEnd=function(e){n.userIsDragging||(n.controlRef&&!n.controlRef.contains(e.target)&&n.menuListRef&&!n.menuListRef.contains(e.target)&&n.blurInput(),n.initialTouchX=0,n.initialTouchY=0)},n.onControlTouchEnd=function(e){n.userIsDragging||n.onControlMouseDown(e)},n.onClearIndicatorTouchEnd=function(e){n.userIsDragging||n.onClearIndicatorMouseDown(e)},n.onDropdownIndicatorTouchEnd=function(e){n.userIsDragging||n.onDropdownIndicatorMouseDown(e)},n.handleInputChange=function(e){var t=e.currentTarget.value;n.inputIsHiddenAfterUpdate=!1,n.onInputChange(t,{action:"input-change"}),n.onMenuOpen()},n.onInputFocus=function(e){var t=n.props,r=t.isSearchable,o=t.isMulti;n.props.onFocus&&n.props.onFocus(e),n.inputIsHiddenAfterUpdate=!1,n.announceAriaLiveContext({event:"input",context:{isSearchable:r,isMulti:o}}),n.setState({isFocused:!0}),(n.openAfterFocus||n.props.openMenuOnFocus)&&n.openMenu("first"),n.openAfterFocus=!1},n.onInputBlur=function(e){n.menuListRef&&n.menuListRef.contains(document.activeElement)?n.inputRef.focus():(n.props.onBlur&&n.props.onBlur(e),n.onInputChange("",{action:"input-blur"}),n.onMenuClose(),n.setState({focusedValue:null,isFocused:!1}))},n.onOptionHover=function(e){n.blockOptionHover||n.state.focusedOption===e||n.setState({focusedOption:e})},n.shouldHideSelectedOptions=function(){var e=n.props,t=e.hideSelectedOptions,r=e.isMulti;return void 0===t?r:t},n.onKeyDown=function(e){var t=n.props,r=t.isMulti,o=t.backspaceRemovesValue,i=t.escapeClearsValue,a=t.inputValue,l=t.isClearable,u=t.isDisabled,c=t.menuIsOpen,s=t.onKeyDown,f=t.tabSelectsValue,p=t.openMenuOnFocus,d=n.state,h=d.focusedOption,m=d.focusedValue,g=d.selectValue;if(!(u||"function"==typeof s&&(s(e),e.defaultPrevented))){switch(n.blockOptionHover=!0,e.key){case"ArrowLeft":if(!r||a)return;n.focusValue("previous");break;case"ArrowRight":if(!r||a)return;n.focusValue("next");break;case"Delete":case"Backspace":if(a)return;if(m)n.removeValue(m);else{if(!o)return;r?n.popValue():l&&n.clearValue()}break;case"Tab":if(n.isComposing)return;if(e.shiftKey||!c||!f||!h||p&&n.isOptionSelected(h,g))return;n.selectOption(h);break;case"Enter":if(229===e.keyCode)break;if(c){if(!h)return;if(n.isComposing)return;n.selectOption(h);break}return;case"Escape":c?(n.inputIsHiddenAfterUpdate=!1,n.onInputChange("",{action:"menu-close"}),n.onMenuClose()):l&&i&&n.clearValue();break;case" ":if(a)return;if(!c){n.openMenu("first");break}if(!h)return;n.selectOption(h);break;case"ArrowUp":c?n.focusOption("up"):n.openMenu("last");break;case"ArrowDown":c?n.focusOption("down"):n.openMenu("first");break;case"PageUp":if(!c)return;n.focusOption("pageup");break;case"PageDown":if(!c)return;n.focusOption("pagedown");break;case"Home":if(!c)return;n.focusOption("first");break;case"End":if(!c)return;n.focusOption("last");break;default:return}e.preventDefault()}},n.buildMenuOptions=function(e,t){var r=e.inputValue,o=void 0===r?"":r,i=e.options,a=function(e,r){var i=n.isOptionDisabled(e,t),a=n.isOptionSelected(e,t),l=n.getOptionLabel(e),u=n.getOptionValue(e);if(!(n.shouldHideSelectedOptions()&&a||!n.filterOption({label:l,value:u,data:e},o))){var c=i?void 0:function(){return n.onOptionHover(e)},s=i?void 0:function(){return n.selectOption(e)},f=n.getElementId("option")+"-"+r;return{innerProps:{id:f,onClick:s,onMouseMove:c,onMouseOver:c,tabIndex:-1},data:e,isDisabled:i,isSelected:a,key:f,label:l,type:"option",value:u}}};return i.reduce((function(e,t,r){if(t.options){n.hasGroups||(n.hasGroups=!0);var o=t.options.map((function(t,n){var o=a(t,r+"-"+n);return o&&e.focusable.push(t),o})).filter(Boolean);if(o.length){var i=n.getElementId("group")+"-"+r;e.render.push({type:"group",key:i,data:t,options:o})}}else{var l=a(t,""+r);l&&(e.render.push(l),e.focusable.push(t))}return e}),{render:[],focusable:[]})};var r=t.value;n.cacheComponents=lt(n.cacheComponents,ze).bind(Lt(Lt(n))),n.cacheComponents(t.components),n.instancePrefix="react-select-"+(n.props.instanceId||++Ht);var o=ce(r);n.buildMenuOptions=lt(n.buildMenuOptions,(function(e,t){var n=e,r=n[0],o=n[1],i=t,a=i[0];return ze(o,i[1])&&ze(r.inputValue,a.inputValue)&&ze(r.options,a.options)})).bind(Lt(Lt(n)));var i=t.menuIsOpen?n.buildMenuOptions(t,o):{render:[],focusable:[]};return n.state.menuOptions=i,n.state.selectValue=o,n}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var i=r.prototype;return i.componentDidMount=function(){this.startListeningComposition(),this.startListeningToTouch(),this.props.closeMenuOnScroll&&document&&document.addEventListener&&document.addEventListener("scroll",this.onScroll,!0),this.props.autoFocus&&this.focusInput()},i.UNSAFE_componentWillReceiveProps=function(e){var t=this.props,n=t.options,r=t.value,o=t.menuIsOpen,i=t.inputValue;if(this.cacheComponents(e.components),e.value!==r||e.options!==n||e.menuIsOpen!==o||e.inputValue!==i){var a=ce(e.value),l=e.menuIsOpen?this.buildMenuOptions(e,a):{render:[],focusable:[]},u=this.getNextFocusedValue(a),c=this.getNextFocusedOption(l.focusable);this.setState({menuOptions:l,selectValue:a,focusedOption:c,focusedValue:u})}null!=this.inputIsHiddenAfterUpdate&&(this.setState({inputIsHidden:this.inputIsHiddenAfterUpdate}),delete this.inputIsHiddenAfterUpdate)},i.componentDidUpdate=function(e){var t,n,r,o,i,a=this.props,l=a.isDisabled,u=a.menuIsOpen,c=this.state.isFocused;(c&&!l&&e.isDisabled||c&&u&&!e.menuIsOpen)&&this.focusInput(),this.menuListRef&&this.focusedOptionRef&&this.scrollToFocusedOptionOnUpdate&&(t=this.menuListRef,n=this.focusedOptionRef,r=t.getBoundingClientRect(),o=n.getBoundingClientRect(),i=n.offsetHeight/3,o.bottom+i>r.bottom?pe(t,Math.min(n.offsetTop+n.clientHeight-t.offsetHeight+i,t.scrollHeight)):o.top-i<r.top&&pe(t,Math.max(n.offsetTop-i,0)),this.scrollToFocusedOptionOnUpdate=!1)},i.componentWillUnmount=function(){this.stopListeningComposition(),this.stopListeningToTouch(),document.removeEventListener("scroll",this.onScroll,!0)},i.onMenuOpen=function(){this.props.onMenuOpen()},i.onMenuClose=function(){var e=this.props,t=e.isSearchable,n=e.isMulti;this.announceAriaLiveContext({event:"input",context:{isSearchable:t,isMulti:n}}),this.onInputChange("",{action:"menu-close"}),this.props.onMenuClose()},i.onInputChange=function(e,t){this.props.onInputChange(e,t)},i.focusInput=function(){this.inputRef&&this.inputRef.focus()},i.blurInput=function(){this.inputRef&&this.inputRef.blur()},i.openMenu=function(e){var t=this,n=this.state,r=n.selectValue,o=n.isFocused,i=this.buildMenuOptions(this.props,r),a=this.props.isMulti,l="first"===e?0:i.focusable.length-1;if(!a){var u=i.focusable.indexOf(r[0]);u>-1&&(l=u)}this.scrollToFocusedOptionOnUpdate=!(o&&this.menuListRef),this.inputIsHiddenAfterUpdate=!1,this.setState({menuOptions:i,focusedValue:null,focusedOption:i.focusable[l]},(function(){t.onMenuOpen(),t.announceAriaLiveContext({event:"menu"})}))},i.focusValue=function(e){var t=this.props,n=t.isMulti,r=t.isSearchable,o=this.state,i=o.selectValue,a=o.focusedValue;if(n){this.setState({focusedOption:null});var l=i.indexOf(a);a||(l=-1,this.announceAriaLiveContext({event:"value"}));var u=i.length-1,c=-1;if(i.length){switch(e){case"previous":c=0===l?0:-1===l?u:l-1;break;case"next":l>-1&&l<u&&(c=l+1)}-1===c&&this.announceAriaLiveContext({event:"input",context:{isSearchable:r,isMulti:n}}),this.setState({inputIsHidden:-1!==c,focusedValue:i[c]})}}},i.focusOption=function(e){void 0===e&&(e="first");var t=this.props.pageSize,n=this.state,r=n.focusedOption,o=n.menuOptions.focusable;if(o.length){var i=0,a=o.indexOf(r);r||(a=-1,this.announceAriaLiveContext({event:"menu"})),"up"===e?i=a>0?a-1:o.length-1:"down"===e?i=(a+1)%o.length:"pageup"===e?(i=a-t)<0&&(i=0):"pagedown"===e?(i=a+t)>o.length-1&&(i=o.length-1):"last"===e&&(i=o.length-1),this.scrollToFocusedOptionOnUpdate=!0,this.setState({focusedOption:o[i],focusedValue:null}),this.announceAriaLiveContext({event:"menu",context:{isDisabled:jt(o[i])}})}},i.getTheme=function(){return this.props.theme?"function"==typeof this.props.theme?this.props.theme(Rt):It({},Rt,this.props.theme):Rt},i.getCommonProps=function(){var e=this.clearValue,t=this.getStyles,n=this.setValue,r=this.selectOption,o=this.props,i=o.classNamePrefix,a=o.isMulti,l=o.isRtl,u=o.options,c=this.state.selectValue,s=this.hasValue();return{cx:ue.bind(null,i),clearValue:e,getStyles:t,getValue:function(){return c},hasValue:s,isMulti:a,isRtl:l,options:u,selectOption:r,setValue:n,selectProps:o,theme:this.getTheme()}},i.getNextFocusedValue=function(e){if(this.clearFocusValueOnUpdate)return this.clearFocusValueOnUpdate=!1,null;var t=this.state,n=t.focusedValue,r=t.selectValue.indexOf(n);if(r>-1){if(e.indexOf(n)>-1)return n;if(r<e.length)return e[r]}return null},i.getNextFocusedOption=function(e){var t=this.state.focusedOption;return t&&e.indexOf(t)>-1?t:e[0]},i.hasValue=function(){return this.state.selectValue.length>0},i.hasOptions=function(){return!!this.state.menuOptions.render.length},i.countOptions=function(){return this.state.menuOptions.focusable.length},i.isClearable=function(){var e=this.props,t=e.isClearable,n=e.isMulti;return void 0===t?n:t},i.isOptionDisabled=function(e,t){return"function"==typeof this.props.isOptionDisabled&&this.props.isOptionDisabled(e,t)},i.isOptionSelected=function(e,t){var n=this;if(t.indexOf(e)>-1)return!0;if("function"==typeof this.props.isOptionSelected)return this.props.isOptionSelected(e,t);var r=this.getOptionValue(e);return t.some((function(e){return n.getOptionValue(e)===r}))},i.filterOption=function(e,t){return!this.props.filterOption||this.props.filterOption(e,t)},i.formatOptionLabel=function(e,t){if("function"==typeof this.props.formatOptionLabel){var n=this.props.inputValue,r=this.state.selectValue;return this.props.formatOptionLabel(e,{context:t,inputValue:n,selectValue:r})}return this.getOptionLabel(e)},i.formatGroupLabel=function(e){return this.props.formatGroupLabel(e)},i.startListeningComposition=function(){document&&document.addEventListener&&(document.addEventListener("compositionstart",this.onCompositionStart,!1),document.addEventListener("compositionend",this.onCompositionEnd,!1))},i.stopListeningComposition=function(){document&&document.removeEventListener&&(document.removeEventListener("compositionstart",this.onCompositionStart),document.removeEventListener("compositionend",this.onCompositionEnd))},i.startListeningToTouch=function(){document&&document.addEventListener&&(document.addEventListener("touchstart",this.onTouchStart,!1),document.addEventListener("touchmove",this.onTouchMove,!1),document.addEventListener("touchend",this.onTouchEnd,!1))},i.stopListeningToTouch=function(){document&&document.removeEventListener&&(document.removeEventListener("touchstart",this.onTouchStart),document.removeEventListener("touchmove",this.onTouchMove),document.removeEventListener("touchend",this.onTouchEnd))},i.constructAriaLiveMessage=function(){var e=this.state,t=e.ariaLiveContext,n=e.selectValue,r=e.focusedValue,o=e.focusedOption,i=this.props,a=i.options,l=i.menuIsOpen,u=i.inputValue,c=i.screenReaderStatus;return(r?function(e){var t=e.focusedValue,n=e.getOptionLabel,r=e.selectValue;return"value "+n(t)+" focused, "+(r.indexOf(t)+1)+" of "+r.length+"."}({focusedValue:r,getOptionLabel:this.getOptionLabel,selectValue:n}):"")+" "+(o&&l?function(e){var t=e.focusedOption,n=e.getOptionLabel,r=e.options;return"option "+n(t)+" focused"+(t.isDisabled?" disabled":"")+", "+(r.indexOf(t)+1)+" of "+r.length+"."}({focusedOption:o,getOptionLabel:this.getOptionLabel,options:a}):"")+" "+function(e){var t=e.inputValue;return e.screenReaderMessage+(t?" for search term "+t:"")+"."}({inputValue:u,screenReaderMessage:c({count:this.countOptions()})})+" "+t},i.renderInput=function(){var e=this.props,t=e.isDisabled,n=e.isSearchable,r=e.inputId,i=e.inputValue,a=e.tabIndex,l=this.components.Input,u=this.state.inputIsHidden,c=r||this.getElementId("input"),s={"aria-autocomplete":"list","aria-label":this.props["aria-label"],"aria-labelledby":this.props["aria-labelledby"]};if(!n)return o.a.createElement(vt,It({id:c,innerRef:this.getInputRef,onBlur:this.onInputBlur,onChange:ae,onFocus:this.onInputFocus,readOnly:!0,disabled:t,tabIndex:a,value:""},s));var f=this.commonProps,p=f.cx,d=f.theme,h=f.selectProps;return o.a.createElement(l,It({autoCapitalize:"none",autoComplete:"off",autoCorrect:"off",cx:p,getStyles:this.getStyles,id:c,innerRef:this.getInputRef,isDisabled:t,isHidden:u,onBlur:this.onInputBlur,onChange:this.handleInputChange,onFocus:this.onInputFocus,selectProps:h,spellCheck:"false",tabIndex:a,theme:d,type:"text",value:i},s))},i.renderPlaceholderOrValue=function(){var e=this,t=this.components,n=t.MultiValue,r=t.MultiValueContainer,i=t.MultiValueLabel,a=t.MultiValueRemove,l=t.SingleValue,u=t.Placeholder,c=this.commonProps,s=this.props,f=s.controlShouldRenderValue,p=s.isDisabled,d=s.isMulti,h=s.inputValue,m=s.placeholder,g=this.state,v=g.selectValue,b=g.focusedValue,y=g.isFocused;if(!this.hasValue()||!f)return h?null:o.a.createElement(u,It({},c,{key:"placeholder",isDisabled:p,isFocused:y}),m);if(d)return v.map((function(t,l){var u=t===b;return o.a.createElement(n,It({},c,{components:{Container:r,Label:i,Remove:a},isFocused:u,isDisabled:p,key:e.getOptionValue(t),index:l,removeProps:{onClick:function(){return e.removeValue(t)},onTouchEnd:function(){return e.removeValue(t)},onMouseDown:function(e){e.preventDefault(),e.stopPropagation()}},data:t}),e.formatOptionLabel(t,"value"))}));if(h)return null;var w=v[0];return o.a.createElement(l,It({},c,{data:w,isDisabled:p}),this.formatOptionLabel(w,"value"))},i.renderClearIndicator=function(){var e=this.components.ClearIndicator,t=this.commonProps,n=this.props,r=n.isDisabled,i=n.isLoading,a=this.state.isFocused;if(!this.isClearable()||!e||r||!this.hasValue()||i)return null;var l={onMouseDown:this.onClearIndicatorMouseDown,onTouchEnd:this.onClearIndicatorTouchEnd,"aria-hidden":"true"};return o.a.createElement(e,It({},t,{innerProps:l,isFocused:a}))},i.renderLoadingIndicator=function(){var e=this.components.LoadingIndicator,t=this.commonProps,n=this.props,r=n.isDisabled,i=n.isLoading,a=this.state.isFocused;if(!e||!i)return null;return o.a.createElement(e,It({},t,{innerProps:{"aria-hidden":"true"},isDisabled:r,isFocused:a}))},i.renderIndicatorSeparator=function(){var e=this.components,t=e.DropdownIndicator,n=e.IndicatorSeparator;if(!t||!n)return null;var r=this.commonProps,i=this.props.isDisabled,a=this.state.isFocused;return o.a.createElement(n,It({},r,{isDisabled:i,isFocused:a}))},i.renderDropdownIndicator=function(){var e=this.components.DropdownIndicator;if(!e)return null;var t=this.commonProps,n=this.props.isDisabled,r=this.state.isFocused,i={onMouseDown:this.onDropdownIndicatorMouseDown,onTouchEnd:this.onDropdownIndicatorTouchEnd,"aria-hidden":"true"};return o.a.createElement(e,It({},t,{innerProps:i,isDisabled:n,isFocused:r}))},i.renderMenu=function(){var e=this,t=this.components,n=t.Group,r=t.GroupHeading,i=t.Menu,a=t.MenuList,l=t.MenuPortal,u=t.LoadingMessage,c=t.NoOptionsMessage,s=t.Option,f=this.commonProps,p=this.state,d=p.focusedOption,h=p.menuOptions,m=this.props,g=m.captureMenuScroll,v=m.inputValue,b=m.isLoading,y=m.loadingMessage,w=m.minMenuHeight,x=m.maxMenuHeight,E=m.menuIsOpen,k=m.menuPlacement,S=m.menuPosition,O=m.menuPortalTarget,C=m.menuShouldBlockScroll,T=m.menuShouldScrollIntoView,_=m.noOptionsMessage,P=m.onMenuScrollToTop,A=m.onMenuScrollToBottom;if(!E)return null;var M,z=function(t){var n=d===t.data;return t.innerRef=n?e.getFocusedOptionRef:void 0,o.a.createElement(s,It({},f,t,{isFocused:n}),e.formatOptionLabel(t.data,"menu"))};if(this.hasOptions())M=h.render.map((function(t){if("group"===t.type){t.type;var i=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(t,["type"]),a=t.key+"-heading";return o.a.createElement(n,It({},f,i,{Heading:r,headingProps:{id:a},label:e.formatGroupLabel(t.data)}),t.options.map((function(e){return z(e)})))}if("option"===t.type)return z(t)}));else if(b){var F=y({inputValue:v});if(null===F)return null;M=o.a.createElement(u,f,F)}else{var j=_({inputValue:v});if(null===j)return null;M=o.a.createElement(c,f,j)}var D={minMenuHeight:w,maxMenuHeight:x,menuPlacement:k,menuPosition:S,menuShouldScrollIntoView:T},R=o.a.createElement(Ee,It({},f,D),(function(t){var n=t.ref,r=t.placerProps,l=r.placement,u=r.maxHeight;return o.a.createElement(i,It({},f,D,{innerRef:n,innerProps:{onMouseDown:e.onMenuMouseDown,onMouseMove:e.onMenuMouseMove},isLoading:b,placement:l}),o.a.createElement(Mt,{isEnabled:g,onTopArrive:P,onBottomArrive:A},o.a.createElement(Pt,{isEnabled:C},o.a.createElement(a,It({},f,{innerRef:e.getMenuListRef,isLoading:b,maxHeight:u}),M))))}));return O||"fixed"===S?o.a.createElement(l,It({},f,{appendTo:O,controlElement:this.controlRef,menuPlacement:k,menuPosition:S}),R):R},i.renderFormField=function(){var e=this,t=this.props,n=t.delimiter,r=t.isDisabled,i=t.isMulti,a=t.name,l=this.state.selectValue;if(a&&!r){if(i){if(n){var u=l.map((function(t){return e.getOptionValue(t)})).join(n);return o.a.createElement("input",{name:a,type:"hidden",value:u})}var c=l.length>0?l.map((function(t,n){return o.a.createElement("input",{key:"i-"+n,name:a,type:"hidden",value:e.getOptionValue(t)})})):o.a.createElement("input",{name:a,type:"hidden"});return o.a.createElement("div",null,c)}var s=l[0]?this.getOptionValue(l[0]):"";return o.a.createElement("input",{name:a,type:"hidden",value:s})}},i.renderLiveRegion=function(){return this.state.isFocused?o.a.createElement(mt,{"aria-live":"polite"},o.a.createElement("p",{id:"aria-selection-event"}," ",this.state.ariaLiveSelection),o.a.createElement("p",{id:"aria-context"}," ",this.constructAriaLiveMessage())):null},i.render=function(){var e=this.components,t=e.Control,n=e.IndicatorsContainer,r=e.SelectContainer,i=e.ValueContainer,a=this.props,l=a.className,u=a.id,c=a.isDisabled,s=a.menuIsOpen,f=this.state.isFocused,p=this.commonProps=this.getCommonProps();return o.a.createElement(r,It({},p,{className:l,innerProps:{id:u,onKeyDown:this.onKeyDown},isDisabled:c,isFocused:f}),this.renderLiveRegion(),o.a.createElement(t,It({},p,{innerRef:this.getControlRef,innerProps:{onMouseDown:this.onControlMouseDown,onTouchEnd:this.onControlTouchEnd},isDisabled:c,isFocused:f,menuIsOpen:s}),o.a.createElement(i,It({},p,{isDisabled:c}),this.renderPlaceholderOrValue(),this.renderInput()),o.a.createElement(n,It({},p,{isDisabled:c}),this.renderClearIndicator(),this.renderLoadingIndicator(),this.renderIndicatorSeparator(),this.renderDropdownIndicator())),this.renderMenu(),this.renderFormField())},r}(r.Component);function Wt(){return(Wt=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}).apply(this,arguments)}Bt.defaultProps=Vt;var Ut={defaultInputValue:"",defaultMenuIsOpen:!1,defaultValue:null},$t=n(18);r.Component,Yt=Bt,Xt=Qt=function(e){var t,n;function r(){for(var t,n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];return(t=e.call.apply(e,[this].concat(r))||this).select=void 0,t.state={inputValue:void 0!==t.props.inputValue?t.props.inputValue:t.props.defaultInputValue,menuIsOpen:void 0!==t.props.menuIsOpen?t.props.menuIsOpen:t.props.defaultMenuIsOpen,value:void 0!==t.props.value?t.props.value:t.props.defaultValue},t.onChange=function(e,n){t.callProp("onChange",e,n),t.setState({value:e})},t.onInputChange=function(e,n){var r=t.callProp("onInputChange",e,n);t.setState({inputValue:void 0!==r?r:e})},t.onMenuOpen=function(){t.callProp("onMenuOpen"),t.setState({menuIsOpen:!0})},t.onMenuClose=function(){t.callProp("onMenuClose"),t.setState({menuIsOpen:!1})},t}n=e,(t=r).prototype=Object.create(n.prototype),t.prototype.constructor=t,t.__proto__=n;var i=r.prototype;return i.focus=function(){this.select.focus()},i.blur=function(){this.select.blur()},i.getProp=function(e){return void 0!==this.props[e]?this.props[e]:this.state[e]},i.callProp=function(e){if("function"==typeof this.props[e]){for(var t,n=arguments.length,r=new Array(n>1?n-1:0),o=1;o<n;o++)r[o-1]=arguments[o];return(t=this.props)[e].apply(t,r)}},i.render=function(){var e=this,t=this.props,n=(t.defaultInputValue,t.defaultMenuIsOpen,t.defaultValue,function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}(t,["defaultInputValue","defaultMenuIsOpen","defaultValue"]));return o.a.createElement(Yt,Wt({},n,{ref:function(t){e.select=t},inputValue:this.getProp("inputValue"),menuIsOpen:this.getProp("menuIsOpen"),onChange:this.onChange,onInputChange:this.onInputChange,onMenuClose:this.onMenuClose,onMenuOpen:this.onMenuOpen,value:this.getProp("value")}))},r}(r.Component),Qt.defaultProps=Ut;var Yt,Qt,Xt,Gt=n(14),qt=n.n(Gt),Kt=n(17);n(24),n(25);function Zt(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}var Jt=function(e,t){return"function"==typeof t?t(e):function(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?Zt(n,!0).forEach((function(t){qt()(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):Zt(n).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}({},e,{},t)},en=Object(Kt.a)((function(e){return Object(Kt.a)((function(t){return Jt(e,t)}))})),tn=function(e){return Object(r.createElement)(ie.c.Consumer,null,(function(t){return e.theme!==t&&(t=en(t)(e.theme)),Object(r.createElement)(ie.c.Provider,{value:t},e.children)}))};n(5);var nn=function(){return(nn=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},rn=Object(k.default)(d)(Y.borderRadius);function on(e){return r.createElement(rn,nn({},e))}on.defaultProps={bg:"neutral_0",borderRadius:"normal",boxShadow:"card",p:3};var an=function(){return(an=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},ln=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var un=function(e){var t=e.sx,n=e.inheritColor,o=e.variant,i=void 0===o?"h3":o,a=e.loading,l=ln(e,["sx","inheritColor","variant","loading"]);return r.createElement(s,an({as:i,variant:"heading."+i},l,{sx:an(an(an({fontFamily:"Lato",fontWeight:1,letterSpacing:0,lineHeight:"normal",m:0,color:"neutral_8"},t||{}),n?{color:"inherit !important"}:{}),a?{letterSpacing:"0px",color:"neutral_2 !important"}:{})}))},cn=function(){return(cn=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},sn=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var fn=function(e){var t=e.reverseAlign,n=void 0!==t&&t,o=e.children,i=e.text,a=e.contents,u=e.username,c=sn(e,["reverseAlign","children","text","contents","username"]),s=r.useMemo((function(){return n?"primary":"secondary"}),[n]);return r.createElement(l.Flex,cn({flexDirection:n?"row-reverse":"row"},c),r.createElement(g,cn({variant:"medium",username:u},n?{ml:2}:{mr:2}),!u&&r.createElement(K,{icon:["fas","robot"]})),r.createElement(l.Flex,{flexDirection:"column",style:{maxWidth:"320px"}},a&&a.image&&r.createElement(te,{src:a.image,width:1,sx:{borderRadius:"16px 16px 0 0"}}),r.createElement(hn,{reverseAlign:n,variant:s,borderRadius:a&&a.image?"0 0 16px 16px":null,sx:i||o?{}:{color:"neutral_6",fontStyle:"italic"}},i||"There is no response text here"),r.createElement(mn,{buttons:a&&a.buttons})))},pn=function(){return(pn=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},dn=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};var hn=function(e){var t=e.sx,n=e.reverseAlign,o=void 0!==n&&n,i=e.variant,a=void 0===i?"primary":i,u=e.borderRadius,c=dn(e,["sx","reverseAlign","variant","borderRadius"]);return r.createElement(l.Box,pn({px:"20px",py:"10px",maxWidth:"408px",width:[0,"stretch"],fontSize:2,variant:"messageBubble."+a,sx:pn(pn(pn({letterSpacing:0,lineHeight:"normal",overflowWrap:"break-word"},t||{}),o?{alignSelf:"flex-end"}:{alignSelf:"flex-start"}),u?{borderRadius:u}:o?{borderRadius:"20px 4px 20px 20px"}:{borderRadius:"4px 20px 20px 20px"})},c))};var mn=function(e){var t=e.onClick,n=e.buttons,o=void 0===n?[]:n;return o&&0!==o.length?r.createElement(r.Fragment,null,Array.isArray(o)&&r.createElement(l.Flex,{mt:2,px:"12px",flexWrap:"wrap"},o.map((function(e,n){return r.createElement(E,{disabled:!t,key:e&&e.payload||"new-button-"+n,onClick:function(n){n.stopPropagation(),t&&t(e?e.payload:"")},mr:1,mb:1,variant:"primary"},e&&e.title||"New Button")})))):null};var gn=function(){return(gn=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},vn=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n};function bn(e){var t=e.actions,n=e.body,o=e.heading,i=(e.imegeAlt,e.imageWidth,vn(e,["actions","body","heading","imegeAlt","imageWidth"]));return r.createElement(l.Flex,gn({flex:"1 1 auto",color:"neutral_6",flexDirection:"column",justifyContent:"center",alignItems:"center",style:{height:"100%"}},i),r.createElement(un,{variant:"h4"},o),r.createElement(oe,{as:"div",inheritColor:!0,textAlign:"center",variant:"captionSecondary",lineHeight:"normal",sx:{maxWidth:"220px"},p:2},n),t&&r.createElement(l.Box,{mt:3},t))}bn.defaultProps={bg:"neutral_0"};var yn=bn;n(26);var wn=function(){return(wn=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},xn={bg:"primary_2",border:"none",borderRadius:"normal",boxShadow:"button",color:"neutral_0",fontSize:1,fontWeight:1,lineHeight:"normal",px:3,py:"6px","&.large":{fontSize:2,py:"10px"},"&:hover":{bg:"primary_1"},"&:disabled, &.disabled":{color:"neutral_6",bg:"neutral_3",boxShadow:"none"}},En={bg:"transparent",border:"none",borderRadius:"normal",color:"neutral_6",fontSize:"10px",lineHeight:0,p:2,"&:hover":{color:"neutral_7"},"&.active":{color:"primary_1"}},kn={primary:xn,primaryDark:wn(wn({},xn),{boxShadow:"none"}),login:wn(wn({},xn),{letterSpacing:"1.1px !important",lineHeight:"28px"}),secondary:{bg:"transparent",border:"1px solid",borderColor:"primary_2",borderRadius:"normal",color:"primary_2",fontSize:1,fontWeight:1,lineHeight:"normal",px:3,py:"5px","&.large":{fontSize:2,py:"9px"},"&:hover":{borderColor:"primary_1",color:"primary_1"},"&:disabled, &.disabled":{borderColor:"neutral_5",color:"neutral_6"}},dark:{bg:"neutral_8",borderRadius:"normal",color:"neutral_0",fontSize:1,fontWeight:1,lineHeight:"normal",px:3,py:"6px","&.large":{fontSize:2,py:"10px"}},inverse:{bg:"transparent",border:"1px solid",borderColor:"neutral_0",borderRadius:"normal",color:"neutral_0",fontSize:1,fontWeight:1,lineHeight:"normal",px:3,py:"6px","&.large":{fontSize:2,py:"10px"}},plain:{bg:"transparent",border:"none",color:"neutral_0",fontSize:1,fontWeight:1,lineHeight:"small",p:0,"&.large":{fontSize:2}},icon:{bg:"transparent",border:"none",borderRadius:"normal",color:"neutral_6",fontSize:"16px",lineHeight:0,p:"12px","&.active":{bg:"transparent",color:"primary_2"},"&:hover":{bg:"primary_2",color:"neutral_0"}},headerIcon:{bg:"neutral_0",border:"none",borderRadius:0,color:"neutral_6",fontSize:"16px",lineHeight:0,p:"28px","&.active":{bg:"primary_2",color:"neutral_0"},"&:hover":{"&.active":{bg:"primary_1",color:"neutral_0"},bg:"neutral_1",color:"neutral_6"}},plainIcon:En,plainIconDark:wn(wn({},En),{"&:hover":wn(wn({},En["&:hover"]),{color:"neutral_1"})}),barIcon:{bg:"transparent",border:"none",borderRadius:0,color:"primary_2",fontSize:"16px",lineHeight:0,p:"24px","&.active":{bg:"primary_2",color:"neutral_0"},"&:hover":{"&.active":{bg:"primary_1",color:"neutral_0"},bg:"neutral_1",color:"primary_1"}},onboardingIcon:wn(wn({},xn),{bg:"neutral_0",color:"primary_2",padding:0,border:"1px solid",borderColor:"primary_4",borderRadius:50,fontSize:"20px",width:"48px",height:"48px","&:hover":{bg:"neutral_1",color:"neutral_7"},"&.active":{color:"primary_1"}})},Sn={alert:{primary:{bg:"primary_2",color:"neutral_0"},error:{bg:"secondary_2",color:"neutral_0"},success:{bg:"cyan_3",color:"#56bdba",borderColor:"cyan_2",border:"solid 1px"},alertThatUsesColoursThatArentInTheStyleguide:{bg:"#e8fff7",color:"#56bdba",border:"solid 1px",borderColor:"#88deca"}},avatar:{small:{width:"28px",height:"28px"},medium:{width:"32px",height:"32px"},large:{width:"40px",height:"40px"},xl:{width:"56px",height:"56px"}},messageBubble:{primary:{bg:"primary_2",color:"neutral_0"},secondary:{bg:"neutral_3",color:"neutral_8"},flagged:{bg:"secondary_2",color:"neutral_0"},neutral:{bg:"neutral_0",color:"neutral_8"},custom:{bg:"neutral_3",color:"neutral_8",padding:0}},separator:{left:{"& > :first-of-type":{pr:"12px"},"&:before":{display:"none"}},center:{"& > :first-of-type":{px:"12px"}},right:{"& > :first-of-type":{pl:"12px"},"&:after":{display:"none"}}}},On={button:kn,option:{default:{color:"neutral_8","&:hover":{bg:"neutral_1"}},primary:{color:"primary_2","&:hover":{bg:"neutral_1",color:"primary_1","&.active":{bg:"primary_1",color:"neutral_0"}}}}},Cn={text:{bodyPrimary:{color:"neutral_8",fontSize:3,lineHeight:"normal"},bodySecondary:{color:"neutral_8",fontSize:2,lineHeight:"normal"},captionPrimary:{color:"neutral_6",fontSize:2,lineHeight:"small"},captionSecondary:{color:"neutral_6",fontSize:0,lineHeight:"tight"},metadata:{color:"neutral_7",fontSize:0,lineHeight:"tight"},code:{color:"neutral_8",fontFamily:"IBM Plex Mono",fontSize:1,lineHeight:"normal"}},heading:{h1:{fontSize:6},h2:{fontSize:4},h3:{fontSize:3},h4:{fontSize:2},h5:{fontSize:1}}},Tn=[400,700],_n={primary_1:"#4A3DD4",primary_2:"#574AE2",primary_3:"#6C5EF9",primary_4:"#DDDCF6",neutral_0:"#FFFFFF",neutral_1:"#F6F7FB",neutral_2:"#F2F4FA",neutral_3:"#ECEFF7",neutral_4:"#DDE2EF",neutral_5:"#CED5E7",neutral_6:"#979FB4",neutral_7:"#2C3951",neutral_8:"#252D40",secondary_1:"#972422",secondary_2:"#C6423F",secondary_3:"#E85653",secondary_4:"#F8CBCA",orange_1:"#EB9015",orange_2:"#FF9F1C",orange_3:"#F8DEBB",yellow_1:"#E4B912",yellow_2:"#FFD121",yellow_3:"#FBF2D1",green_1:"#48A9A6",green_2:"#4DD6D2",green_3:"#D6EBEA",blue_1:"#3158D6",blue_2:"#446FF8",blue_3:"#D0E3FC",cyan_1:"#4DD6D2",cyan_2:"#87E5E2",cyan_3:"#DEFBFA",dark_1:"#2C3951",dark_2:"#263146",grey:"#dde2ef",black:"#333333",white:"#ffffff",active:"#e6f7ff"},Pn={breakpoints:["0","500px","800px","1200px","1900px"],colors:_n,colours:_n,fontSizes:[11,12,13,14,16,20,24,32,48,64,72],fontWeights:Tn,gradients:{primary_1:"linear-gradient(56deg, rgba(113, 22, 208, 0.95), #4e61e1)"},heights:[32,40,48,56,64],letterSpacings:[.6,.7,.9,1.4],lineHeights:{large:"36px",big:"24px",normal:"20px",small:"16px",tight:"12px"},space:[0,4,8,16,24,32,40,48,56,64,72,80,128,256,512],fonts:{mono:'"IBM Plex Mono", "Courier New", Courier, monospace',sansSerif:'"Lato", -apple-system, BlinkMacSystemFont, "avenir next", avenir, "helvetica neue", helvetica, ubuntu, roboto, noto, "segoe ui", arial, sans-serif'},active:{background:_n.active},radii:{tiny:"2px",small:"4px",normal:"8px",large:"16px"},actions:{default:{color:_n.neutral_6},incorrect:{color:_n.neutral_6,textDecoration:"line-through"},correct:{color:_n.primary_2,fontWeight:Tn[1]}},shadows:{sm:"0 6px 21px 0 rgba(152, 152, 152, 0.06)",md:"0 3px 6px 0 rgba(96,97,149,0.20)",lg:"0 11px 43px 0 rgba(120, 119, 150, 0.1)",selectBox:"0 2px 8px 0 "+_n.neutral_5,card:"0 2px 4px 0 rgba(207, 207, 207, 0.21)",button:"0 2px 4px 0 rgba(207, 207, 207, 0.67)"},menuItem:{default:{color:_n.neutral_6,backgroundColor:"transparent","&:hover":{color:_n.neutral_0,backgroundColor:_n.dark_2}}},text:Cn,buttons:On,variants:Sn},An=function(e){var t=e.children;return r.createElement(tn,{theme:Pn},t)},Mn={prefix:"fas",iconName:"arrow-left",icon:[448,512,[],"f060","M257.5 445.1l-22.2 22.2c-9.4 9.4-24.6 9.4-33.9 0L7 273c-9.4-9.4-9.4-24.6 0-33.9L201.4 44.7c9.4-9.4 24.6-9.4 33.9 0l22.2 22.2c9.5 9.5 9.3 25-.4 34.3L136.6 216H424c13.3 0 24 10.7 24 24v32c0 13.3-10.7 24-24 24H136.6l120.5 114.8c9.8 9.3 10 24.8.4 34.3z"]},zn={prefix:"fas",iconName:"bookmark",icon:[384,512,[],"f02e","M0 512V48C0 21.49 21.49 0 48 0h288c26.51 0 48 21.49 48 48v464L192 400 0 512z"]},Fn={prefix:"fas",iconName:"check",icon:[512,512,[],"f00c","M173.898 439.404l-166.4-166.4c-9.997-9.997-9.997-26.206 0-36.204l36.203-36.204c9.997-9.998 26.207-9.998 36.204 0L192 312.69 432.095 72.596c9.997-9.997 26.207-9.997 36.204 0l36.203 36.204c9.997 9.997 9.997 26.206 0 36.204l-294.4 294.401c-9.998 9.997-26.207 9.997-36.204-.001z"]},jn={prefix:"fas",iconName:"check-circle",icon:[512,512,[],"f058","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zM227.314 387.314l184-184c6.248-6.248 6.248-16.379 0-22.627l-22.627-22.627c-6.248-6.249-16.379-6.249-22.628 0L216 308.118l-70.059-70.059c-6.248-6.248-16.379-6.248-22.628 0l-22.627 22.627c-6.248 6.248-6.248 16.379 0 22.627l104 104c6.249 6.249 16.379 6.249 22.628.001z"]},Dn={prefix:"fas",iconName:"circle",icon:[512,512,[],"f111","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8z"]},Rn={prefix:"fas",iconName:"exclamation-circle",icon:[512,512,[],"f06a","M504 256c0 136.997-111.043 248-248 248S8 392.997 8 256C8 119.083 119.043 8 256 8s248 111.083 248 248zm-248 50c-25.405 0-46 20.595-46 46s20.595 46 46 46 46-20.595 46-46-20.595-46-46-46zm-43.673-165.346l7.418 136c.347 6.364 5.609 11.346 11.982 11.346h48.546c6.373 0 11.635-4.982 11.982-11.346l7.418-136c.375-6.874-5.098-12.654-11.982-12.654h-63.383c-6.884 0-12.356 5.78-11.981 12.654z"]},In={prefix:"fas",iconName:"flag-alt",icon:[512,512,[],"f74c","M32 0C14.3 0 0 14.3 0 32v464c0 8.8 7.2 16 16 16h32c8.8 0 16-7.2 16-16V32C64 14.3 49.7 0 32 0zm430.6 4.2C291.3 91.5 305.4-62.2 96 32.4V384c185.7-92.2 221.7 53.3 397.5-23.1 11.4-5 18.5-16.5 18.5-28.8V30.8c0-25.1-26.8-38.1-49.4-26.6z"]},Ln={prefix:"fas",iconName:"minus",icon:[448,512,[],"f068","M416 208H32c-17.67 0-32 14.33-32 32v32c0 17.67 14.33 32 32 32h384c17.67 0 32-14.33 32-32v-32c0-17.67-14.33-32-32-32z"]},Nn={prefix:"fas",iconName:"robot",icon:[640,512,[],"f544","M32,224H64V416H32A31.96166,31.96166,0,0,1,0,384V256A31.96166,31.96166,0,0,1,32,224Zm512-48V448a64.06328,64.06328,0,0,1-64,64H160a64.06328,64.06328,0,0,1-64-64V176a79.974,79.974,0,0,1,80-80H288V32a32,32,0,0,1,64,0V96H464A79.974,79.974,0,0,1,544,176ZM264,256a40,40,0,1,0-40,40A39.997,39.997,0,0,0,264,256Zm-8,128H192v32h64Zm96,0H288v32h64ZM456,256a40,40,0,1,0-40,40A39.997,39.997,0,0,0,456,256Zm-8,128H384v32h64ZM640,256V384a31.96166,31.96166,0,0,1-32,32H576V224h32A31.96166,31.96166,0,0,1,640,256Z"]},Vn={prefix:"fas",iconName:"spinner",icon:[512,512,[],"f110","M304 48c0 26.51-21.49 48-48 48s-48-21.49-48-48 21.49-48 48-48 48 21.49 48 48zm-48 368c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zm208-208c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zM96 256c0-26.51-21.49-48-48-48S0 229.49 0 256s21.49 48 48 48 48-21.49 48-48zm12.922 99.078c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48c0-26.509-21.491-48-48-48zm294.156 0c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48c0-26.509-21.49-48-48-48zM108.922 60.922c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.491-48-48-48z"]},Hn={prefix:"fas",iconName:"user",icon:[448,512,[],"f007","M224 256c70.7 0 128-57.3 128-128S294.7 0 224 0 96 57.3 96 128s57.3 128 128 128zm89.6 32h-16.7c-22.2 10.2-46.9 16-72.9 16s-50.6-5.8-72.9-16h-16.7C60.2 288 0 348.2 0 422.4V464c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48v-41.6c0-74.2-60.2-134.4-134.4-134.4z"]},Bn={prefix:"far",iconName:"check-circle",icon:[512,512,[],"f058","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 48c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m140.204 130.267l-22.536-22.718c-4.667-4.705-12.265-4.736-16.97-.068L215.346 303.697l-59.792-60.277c-4.667-4.705-12.265-4.736-16.97-.069l-22.719 22.536c-4.705 4.667-4.736 12.265-.068 16.971l90.781 91.516c4.667 4.705 12.265 4.736 16.97.068l172.589-171.204c4.704-4.668 4.734-12.266.067-16.971z"]},Wn={prefix:"far",iconName:"clone",icon:[512,512,[],"f24d","M464 0H144c-26.51 0-48 21.49-48 48v48H48c-26.51 0-48 21.49-48 48v320c0 26.51 21.49 48 48 48h320c26.51 0 48-21.49 48-48v-48h48c26.51 0 48-21.49 48-48V48c0-26.51-21.49-48-48-48zM362 464H54a6 6 0 0 1-6-6V150a6 6 0 0 1 6-6h42v224c0 26.51 21.49 48 48 48h224v42a6 6 0 0 1-6 6zm96-96H150a6 6 0 0 1-6-6V54a6 6 0 0 1 6-6h308a6 6 0 0 1 6 6v308a6 6 0 0 1-6 6z"]},Un={prefix:"far",iconName:"flag-alt",icon:[512,512,[],"f74c","M472.5 0c-7 0-14.3 1.5-21.2 4.6-50.5 22.7-87.8 30.3-119.1 30.3C266.1 34.9 227.7.4 151.4.4c-28.4 0-62.2 4.9-104.5 18C44.3 7.9 35.3 0 24 0 10.7 0 0 10.7 0 24v476c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12V398.1c37.3-11.8 69.6-16.5 98.5-16.5 81.2 0 137.8 34.4 219.1 34.4 35.3 0 75.1-6.5 123.7-25 14-5.4 22.8-17.9 22.8-31.2V33.4C512 13 493.4 0 472.5 0zM464 349.1c-35.3 12.7-67.6 18.9-98.5 18.9-75.5 0-128.5-34.4-219.1-34.4-31.9 0-64.5 4.7-98.5 14.2V68.5C87.7 55 121.7 48.4 151.4 48.4c66.3 0 105.2 34.5 180.8 34.5 40.3 0 82.3-10 131.8-31.5v297.7z"]},$n={prefix:"far",iconName:"star",icon:[576,512,[],"f005","M528.1 171.5L382 150.2 316.7 17.8c-11.7-23.6-45.6-23.9-57.4 0L194 150.2 47.9 171.5c-26.2 3.8-36.7 36.1-17.7 54.6l105.7 103-25 145.5c-4.5 26.3 23.2 46 46.4 33.7L288 439.6l130.7 68.7c23.2 12.2 50.9-7.4 46.4-33.7l-25-145.5 105.7-103c19-18.5 8.5-50.8-17.7-54.6zM388.6 312.3l23.7 138.4L288 385.4l-124.3 65.3 23.7-138.4-100.6-98 139-20.2 62.2-126 62.2 126 139 20.2-100.6 98z"]},Yn={prefix:"fal",iconName:"angle-right",icon:[192,512,[],"f105","M166.9 264.5l-117.8 116c-4.7 4.7-12.3 4.7-17 0l-7.1-7.1c-4.7-4.7-4.7-12.3 0-17L127.3 256 25.1 155.6c-4.7-4.7-4.7-12.3 0-17l7.1-7.1c4.7-4.7 12.3-4.7 17 0l117.8 116c4.6 4.7 4.6 12.3-.1 17z"]},Qn={prefix:"fal",iconName:"arrow-to-bottom",icon:[384,512,[],"f33d","M348.5 264l-148 148.5c-4.7 4.7-12.3 4.7-17 0L35.5 264c-4.7-4.7-4.7-12.3 0-17l7.1-7.1c4.7-4.7 12.3-4.7 17 0l115.4 116V44c0-6.6 5.4-12 12-12h10c6.6 0 12 5.4 12 12v311.9L324.4 240c4.7-4.7 12.3-4.7 17 0l7.1 7.1c4.7 4.6 4.7 12.2 0 16.9zM384 468v-8c0-6.6-5.4-12-12-12H12c-6.6 0-12 5.4-12 12v8c0 6.6 5.4 12 12 12h360c6.6 0 12-5.4 12-12z"]},Xn={prefix:"fal",iconName:"arrow-to-top",icon:[384,512,[],"f341","M35.5 248l148-148.5c4.7-4.7 12.3-4.7 17 0l148 148.5c4.7 4.7 4.7 12.3 0 17l-7.1 7.1c-4.7 4.7-12.3 4.7-17 0L209 156.1V468c0 6.6-5.4 12-12 12h-10c-6.6 0-12-5.4-12-12V156.1L59.6 272c-4.7 4.7-12.3 4.7-17 0l-7.1-7.1c-4.7-4.6-4.7-12.2 0-16.9zM0 44v8c0 6.6 5.4 12 12 12h360c6.6 0 12-5.4 12-12v-8c0-6.6-5.4-12-12-12H12C5.4 32 0 37.4 0 44z"]},Gn={prefix:"fal",iconName:"bookmark",icon:[384,512,[],"f02e","M336 0H48C21.49 0 0 21.49 0 48v464l192-112 192 112V48c0-26.51-21.49-48-48-48zm16 456.287l-160-93.333-160 93.333V48c0-8.822 7.178-16 16-16h288c8.822 0 16 7.178 16 16v408.287z"]},qn={prefix:"fal",iconName:"calendar-alt",icon:[448,512,[],"f073","M400 64h-48V12c0-6.6-5.4-12-12-12h-8c-6.6 0-12 5.4-12 12v52H128V12c0-6.6-5.4-12-12-12h-8c-6.6 0-12 5.4-12 12v52H48C21.5 64 0 85.5 0 112v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V112c0-26.5-21.5-48-48-48zM48 96h352c8.8 0 16 7.2 16 16v48H32v-48c0-8.8 7.2-16 16-16zm352 384H48c-8.8 0-16-7.2-16-16V192h384v272c0 8.8-7.2 16-16 16zM148 320h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm96 0h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm96 0h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm-96 96h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm-96 0h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm192 0h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12z"]},Kn={prefix:"fal",iconName:"chart-line",icon:[512,512,[],"f201","M504 416H32V72c0-4.42-3.58-8-8-8H8c-4.42 0-8 3.58-8 8v360c0 8.84 7.16 16 16 16h488c4.42 0 8-3.58 8-8v-16c0-4.42-3.58-8-8-8zM98.34 263.03c-3.12 3.12-3.12 8.19 0 11.31l11.31 11.31c3.12 3.12 8.19 3.12 11.31 0l72.69-72.01 84.69 84.69c6.25 6.25 16.38 6.25 22.63 0l93.53-93.53 44.04 44.04c4.95 4.95 11.03 7.16 17 7.16 12.48 0 24.46-9.7 24.46-24.34V112.19c0-8.94-7.25-16.19-16.19-16.19H344.34c-21.64 0-32.47 26.16-17.17 41.46l44.71 44.71-82.22 82.22-84.63-84.63c-6.23-6.23-16.32-6.25-22.57-.05l-84.12 83.32zM362.96 128H448v85.04L362.96 128z"]},Zn={prefix:"fal",iconName:"check",icon:[448,512,[],"f00c","M413.505 91.951L133.49 371.966l-98.995-98.995c-4.686-4.686-12.284-4.686-16.971 0L6.211 284.284c-4.686 4.686-4.686 12.284 0 16.971l118.794 118.794c4.686 4.686 12.284 4.686 16.971 0l299.813-299.813c4.686-4.686 4.686-12.284 0-16.971l-11.314-11.314c-4.686-4.686-12.284-4.686-16.97 0z"]},Jn={prefix:"fal",iconName:"check-circle",icon:[512,512,[],"f058","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 464c-118.664 0-216-96.055-216-216 0-118.663 96.055-216 216-216 118.664 0 216 96.055 216 216 0 118.663-96.055 216-216 216zm141.63-274.961L217.15 376.071c-4.705 4.667-12.303 4.637-16.97-.068l-85.878-86.572c-4.667-4.705-4.637-12.303.068-16.97l8.52-8.451c4.705-4.667 12.303-4.637 16.97.068l68.976 69.533 163.441-162.13c4.705-4.667 12.303-4.637 16.97.068l8.451 8.52c4.668 4.705 4.637 12.303-.068 16.97z"]},er={prefix:"fal",iconName:"chevron-down",icon:[448,512,[],"f078","M443.5 162.6l-7.1-7.1c-4.7-4.7-12.3-4.7-17 0L224 351 28.5 155.5c-4.7-4.7-12.3-4.7-17 0l-7.1 7.1c-4.7 4.7-4.7 12.3 0 17l211 211.1c4.7 4.7 12.3 4.7 17 0l211-211.1c4.8-4.7 4.8-12.3.1-17z"]},tr={prefix:"fal",iconName:"circle-notch",icon:[512,512,[],"f1ce","M288 24.103v8.169a11.995 11.995 0 0 0 9.698 11.768C396.638 63.425 472 150.461 472 256c0 118.663-96.055 216-216 216-118.663 0-216-96.055-216-216 0-104.534 74.546-192.509 174.297-211.978A11.993 11.993 0 0 0 224 32.253v-8.147c0-7.523-6.845-13.193-14.237-11.798C94.472 34.048 7.364 135.575 8.004 257.332c.72 137.052 111.477 246.956 248.531 246.667C393.255 503.711 504 392.789 504 256c0-121.187-86.924-222.067-201.824-243.704C294.807 10.908 288 16.604 288 24.103z"]},nr={prefix:"fal",iconName:"clipboard",icon:[384,512,[],"f328","M336 64h-88.6c.4-2.6.6-5.3.6-8 0-30.9-25.1-56-56-56s-56 25.1-56 56c0 2.7.2 5.4.6 8H48C21.5 64 0 85.5 0 112v352c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V112c0-26.5-21.5-48-48-48zM192 32c13.3 0 24 10.7 24 24s-10.7 24-24 24-24-10.7-24-24 10.7-24 24-24zm160 432c0 8.8-7.2 16-16 16H48c-8.8 0-16-7.2-16-16V112c0-8.8 7.2-16 16-16h48v20c0 6.6 5.4 12 12 12h168c6.6 0 12-5.4 12-12V96h48c8.8 0 16 7.2 16 16z"]},rr={prefix:"fal",iconName:"code-merge",icon:[384,512,[],"f387","M304 192c-41.7 0-76 32-79.7 72.8-25.2-1.3-61.6-7.9-88.8-31.7-20.3-17.8-32.8-43-37.5-75.1 35.5-8.2 62-40 62-77.9 0-44.2-35.8-80-80-80S0 35.8 0 80c0 38.7 27.5 71 64 78.4v195.2C27.5 361 0 393.3 0 432c0 44.2 35.8 80 80 80s80-35.8 80-80c0-38.7-27.5-71-64-78.4V237.4c5.5 7.2 11.7 13.9 18.6 19.9C151 289 197.9 296.1 228 297c10.5 31.9 40.5 55 76 55 44.2 0 80-35.8 80-80s-35.8-80-80-80zM32 80c0-26.5 21.5-48 48-48s48 21.5 48 48-21.5 48-48 48-48-21.5-48-48zm96 352c0 26.5-21.5 48-48 48s-48-21.5-48-48 21.5-48 48-48 48 21.5 48 48zm176-112c-26.5 0-48-21.5-48-48s21.5-48 48-48 48 21.5 48 48-21.5 48-48 48z"]},or={prefix:"fal",iconName:"cog",icon:[512,512,[],"f013","M482.696 299.276l-32.61-18.827a195.168 195.168 0 0 0 0-48.899l32.61-18.827c9.576-5.528 14.195-16.902 11.046-27.501-11.214-37.749-31.175-71.728-57.535-99.595-7.634-8.07-19.817-9.836-29.437-4.282l-32.562 18.798a194.125 194.125 0 0 0-42.339-24.48V38.049c0-11.13-7.652-20.804-18.484-23.367-37.644-8.909-77.118-8.91-114.77 0-10.831 2.563-18.484 12.236-18.484 23.367v37.614a194.101 194.101 0 0 0-42.339 24.48L105.23 81.345c-9.621-5.554-21.804-3.788-29.437 4.282-26.36 27.867-46.321 61.847-57.535 99.595-3.149 10.599 1.47 21.972 11.046 27.501l32.61 18.827a195.168 195.168 0 0 0 0 48.899l-32.61 18.827c-9.576 5.528-14.195 16.902-11.046 27.501 11.214 37.748 31.175 71.728 57.535 99.595 7.634 8.07 19.817 9.836 29.437 4.283l32.562-18.798a194.08 194.08 0 0 0 42.339 24.479v37.614c0 11.13 7.652 20.804 18.484 23.367 37.645 8.909 77.118 8.91 114.77 0 10.831-2.563 18.484-12.236 18.484-23.367v-37.614a194.138 194.138 0 0 0 42.339-24.479l32.562 18.798c9.62 5.554 21.803 3.788 29.437-4.283 26.36-27.867 46.321-61.847 57.535-99.595 3.149-10.599-1.47-21.972-11.046-27.501zm-65.479 100.461l-46.309-26.74c-26.988 23.071-36.559 28.876-71.039 41.059v53.479a217.145 217.145 0 0 1-87.738 0v-53.479c-33.621-11.879-43.355-17.395-71.039-41.059l-46.309 26.74c-19.71-22.09-34.689-47.989-43.929-75.958l46.329-26.74c-6.535-35.417-6.538-46.644 0-82.079l-46.329-26.74c9.24-27.969 24.22-53.869 43.929-75.969l46.309 26.76c27.377-23.434 37.063-29.065 71.039-41.069V44.464a216.79 216.79 0 0 1 87.738 0v53.479c33.978 12.005 43.665 17.637 71.039 41.069l46.309-26.76c19.709 22.099 34.689 47.999 43.929 75.969l-46.329 26.74c6.536 35.426 6.538 46.644 0 82.079l46.329 26.74c-9.24 27.968-24.219 53.868-43.929 75.957zM256 160c-52.935 0-96 43.065-96 96s43.065 96 96 96 96-43.065 96-96-43.065-96-96-96zm0 160c-35.29 0-64-28.71-64-64s28.71-64 64-64 64 28.71 64 64-28.71 64-64 64z"]},ir={prefix:"fal",iconName:"comment",icon:[512,512,[],"f075","M256 64c123.5 0 224 79 224 176S379.5 416 256 416c-28.3 0-56.3-4.3-83.2-12.8l-15.2-4.8-13 9.2c-23 16.3-58.5 35.3-102.6 39.6 12-15.1 29.8-40.4 40.8-69.6l7.1-18.7-13.7-14.6C47.3 313.7 32 277.6 32 240c0-97 100.5-176 224-176m0-32C114.6 32 0 125.1 0 240c0 47.6 19.9 91.2 52.9 126.3C38 405.7 7 439.1 6.5 439.5c-6.6 7-8.4 17.2-4.6 26 3.8 8.8 12.4 14.5 22 14.5 61.5 0 110-25.7 139.1-46.3 29 9.1 60.2 14.3 93 14.3 141.4 0 256-93.1 256-208S397.4 32 256 32z"]},ar={prefix:"fal",iconName:"comments",icon:[576,512,[],"f086","M569.9 441.1c-.5-.4-22.6-24.2-37.9-54.9 27.5-27.1 44-61.1 44-98.2 0-80-76.5-146.1-176.2-157.9C368.4 72.5 294.3 32 208 32 93.1 32 0 103.6 0 192c0 37 16.5 71 44 98.2-15.3 30.7-37.3 54.5-37.7 54.9-6.3 6.7-8.1 16.5-4.4 25 3.6 8.5 12 14 21.2 14 53.5 0 96.7-20.2 125.2-38.8 9.1 2.1 18.4 3.7 28 4.8 31.5 57.5 105.5 98 191.8 98 20.8 0 40.8-2.4 59.8-6.8 28.5 18.5 71.6 38.8 125.2 38.8 9.2 0 17.5-5.5 21.2-14 3.6-8.5 1.9-18.3-4.4-25zM155.4 314l-13.2-3-11.4 7.4c-20.1 13.1-50.5 28.2-87.7 32.5 8.8-11.3 20.2-27.6 29.5-46.4L83 283.7l-16.5-16.3C50.7 251.9 32 226.2 32 192c0-70.6 79-128 176-128s176 57.4 176 128-79 128-176 128c-17.7 0-35.4-2-52.6-6zm289.8 100.4l-11.4-7.4-13.2 3.1c-17.2 4-34.9 6-52.6 6-65.1 0-122-25.9-152.4-64.3C326.9 348.6 416 278.4 416 192c0-9.5-1.3-18.7-3.3-27.7C488.1 178.8 544 228.7 544 288c0 34.2-18.7 59.9-34.5 75.4L493 379.7l10.3 20.7c9.4 18.9 20.8 35.2 29.5 46.4-37.1-4.2-67.5-19.4-87.6-32.4z"]},lr={prefix:"fal",iconName:"edit",icon:[576,512,[],"f044","M417.8 315.5l20-20c3.8-3.8 10.2-1.1 10.2 4.2V464c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h292.3c5.3 0 8 6.5 4.2 10.2l-20 20c-1.1 1.1-2.7 1.8-4.2 1.8H48c-8.8 0-16 7.2-16 16v352c0 8.8 7.2 16 16 16h352c8.8 0 16-7.2 16-16V319.7c0-1.6.6-3.1 1.8-4.2zm145.9-191.2L251.2 436.8l-99.9 11.1c-13.4 1.5-24.7-9.8-23.2-23.2l11.1-99.9L451.7 12.3c16.4-16.4 43-16.4 59.4 0l52.6 52.6c16.4 16.4 16.4 43 0 59.4zm-93.6 48.4L403.4 106 169.8 339.5l-8.3 75.1 75.1-8.3 233.5-233.6zm71-85.2l-52.6-52.6c-3.8-3.8-10.2-4-14.1 0L426 83.3l66.7 66.7 48.4-48.4c3.9-3.8 3.9-10.2 0-14.1z"]},ur={prefix:"fal",iconName:"ellipsis-h",icon:[320,512,[],"f141","M192 256c0 17.7-14.3 32-32 32s-32-14.3-32-32 14.3-32 32-32 32 14.3 32 32zm88-32c-17.7 0-32 14.3-32 32s14.3 32 32 32 32-14.3 32-32-14.3-32-32-32zm-240 0c-17.7 0-32 14.3-32 32s14.3 32 32 32 32-14.3 32-32-14.3-32-32-32z"]},cr={prefix:"fal",iconName:"ellipsis-v",icon:[64,512,[],"f142","M32 224c17.7 0 32 14.3 32 32s-14.3 32-32 32-32-14.3-32-32 14.3-32 32-32zM0 136c0 17.7 14.3 32 32 32s32-14.3 32-32-14.3-32-32-32-32 14.3-32 32zm0 240c0 17.7 14.3 32 32 32s32-14.3 32-32-14.3-32-32-32-32 14.3-32 32z"]},sr={prefix:"fal",iconName:"exclamation-circle",icon:[512,512,[],"f06a","M256 40c118.621 0 216 96.075 216 216 0 119.291-96.61 216-216 216-119.244 0-216-96.562-216-216 0-119.203 96.602-216 216-216m0-32C119.043 8 8 119.083 8 256c0 136.997 111.043 248 248 248s248-111.003 248-248C504 119.083 392.957 8 256 8zm-11.49 120h22.979c6.823 0 12.274 5.682 11.99 12.5l-7 168c-.268 6.428-5.556 11.5-11.99 11.5h-8.979c-6.433 0-11.722-5.073-11.99-11.5l-7-168c-.283-6.818 5.167-12.5 11.99-12.5zM256 340c-15.464 0-28 12.536-28 28s12.536 28 28 28 28-12.536 28-28-12.536-28-28-28z"]},fr={prefix:"fal",iconName:"file-check",icon:[384,512,[],"f316","M369.941 97.941l-83.882-83.882A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V131.882a48 48 0 0 0-14.059-33.941zm-22.627 22.628a15.89 15.89 0 0 1 4.195 7.431H256V32.491a15.88 15.88 0 0 1 7.431 4.195l83.883 83.883zM336 480H48c-8.837 0-16-7.163-16-16V48c0-8.837 7.163-16 16-16h176v104c0 13.255 10.745 24 24 24h104v304c0 8.837-7.163 16-16 16zm-34.467-210.949l-134.791 133.71c-4.7 4.663-12.288 4.642-16.963-.046l-67.358-67.552c-4.683-4.697-4.672-12.301.024-16.985l8.505-8.48c4.697-4.683 12.301-4.672 16.984.024l50.442 50.587 117.782-116.837c4.709-4.671 12.313-4.641 16.985.068l8.458 8.527c4.672 4.709 4.641 12.313-.068 16.984z"]},pr={prefix:"fal",iconName:"highlighter",icon:[544,512,[],"f591","M528.61 75.91l-60.49-60.52C457.91 5.16 444.45 0 430.98 0a52.38 52.38 0 0 0-34.75 13.15L110.59 261.8c-10.29 9.08-14.33 23.35-10.33 36.49l12.49 41.02-36.54 36.56c-6.74 6.75-6.74 17.68 0 24.43l.25.26L0 479.98 99.88 512l43.99-44.01.02.02c6.75 6.75 17.69 6.75 24.44 0l36.46-36.47 40.91 12.53c18.01 5.51 31.41-4.54 36.51-10.32l248.65-285.9c18.35-20.82 17.37-52.32-2.25-71.94zM91.05 475.55l-32.21-10.33 40.26-42.03 22.14 22.15-30.19 30.21zm167.16-62.99c-.63.72-1.4.94-2.32.94-.26 0-.54-.02-.83-.05l-40.91-12.53-18.39-5.63-39.65 39.67-46.85-46.88 39.71-39.72-5.6-18.38-12.49-41.02c-.34-1.13.01-2.36.73-3l44.97-39.15 120.74 120.8-39.11 44.95zm248.51-285.73L318.36 343.4l-117.6-117.66L417.4 37.15c4.5-3.97 17.55-9.68 28.1.88l60.48 60.52c7.65 7.65 8.04 20 .74 28.28z"]},dr={prefix:"fal",iconName:"image",icon:[512,512,[],"f03e","M464 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm16 336c0 8.822-7.178 16-16 16H48c-8.822 0-16-7.178-16-16V112c0-8.822 7.178-16 16-16h416c8.822 0 16 7.178 16 16v288zM112 232c30.928 0 56-25.072 56-56s-25.072-56-56-56-56 25.072-56 56 25.072 56 56 56zm0-80c13.234 0 24 10.766 24 24s-10.766 24-24 24-24-10.766-24-24 10.766-24 24-24zm207.029 23.029L224 270.059l-31.029-31.029c-9.373-9.373-24.569-9.373-33.941 0l-88 88A23.998 23.998 0 0 0 64 344v28c0 6.627 5.373 12 12 12h360c6.627 0 12-5.373 12-12v-92c0-6.365-2.529-12.47-7.029-16.971l-88-88c-9.373-9.372-24.569-9.372-33.942 0zM416 352H96v-4.686l80-80 48 48 112-112 80 80V352z"]},hr={prefix:"fal",iconName:"inbox",icon:[576,512,[],"f01c","M566.819 227.377L462.377 83.768A48.001 48.001 0 0 0 423.557 64H152.443a47.998 47.998 0 0 0-38.819 19.768L9.181 227.377A47.996 47.996 0 0 0 0 255.609V400c0 26.51 21.49 48 48 48h480c26.51 0 48-21.49 48-48V255.609a47.996 47.996 0 0 0-9.181-28.232zM139.503 102.589A16.048 16.048 0 0 1 152.443 96h271.115c5.102 0 9.939 2.463 12.94 6.589L524.796 224H388.223l-32 64H219.777l-32-64H51.204l88.299-121.411zM544 272v128c0 8.823-7.178 16-16 16H48c-8.822 0-16-7.177-16-16V272c0-8.837 7.163-16 16-16h120l32 64h176l32-64h120c8.837 0 16 7.163 16 16z"]},mr={prefix:"fal",iconName:"info-circle",icon:[512,512,[],"f05a","M256 40c118.621 0 216 96.075 216 216 0 119.291-96.61 216-216 216-119.244 0-216-96.562-216-216 0-119.203 96.602-216 216-216m0-32C119.043 8 8 119.083 8 256c0 136.997 111.043 248 248 248s248-111.003 248-248C504 119.083 392.957 8 256 8zm-36 344h12V232h-12c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h48c6.627 0 12 5.373 12 12v140h12c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12h-72c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12zm36-240c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32z"]},gr={prefix:"fal",iconName:"lightbulb",icon:[352,512,[],"f0eb","M176 0C73.05 0-.12 83.54 0 176.24c.06 44.28 16.5 84.67 43.56 115.54C69.21 321.03 93.85 368.68 96 384l.06 75.18c0 3.15.94 6.22 2.68 8.84l24.51 36.84c2.97 4.46 7.97 7.14 13.32 7.14h78.85c5.36 0 10.36-2.68 13.32-7.14l24.51-36.84c1.74-2.62 2.67-5.7 2.68-8.84L256 384c2.26-15.72 26.99-63.19 52.44-92.22C335.55 260.85 352 220.37 352 176 352 78.8 273.2 0 176 0zm47.94 454.31L206.85 480h-61.71l-17.09-25.69-.01-6.31h95.9v6.31zm.04-38.31h-95.97l-.07-32h96.08l-.04 32zm60.4-145.32c-13.99 15.96-36.33 48.1-50.58 81.31H118.21c-14.26-33.22-36.59-65.35-50.58-81.31C44.5 244.3 32.13 210.85 32.05 176 31.87 99.01 92.43 32 176 32c79.4 0 144 64.6 144 144 0 34.85-12.65 68.48-35.62 94.68zM176 64c-61.75 0-112 50.25-112 112 0 8.84 7.16 16 16 16s16-7.16 16-16c0-44.11 35.88-80 80-80 8.84 0 16-7.16 16-16s-7.16-16-16-16z"]},vr={prefix:"fal",iconName:"lightbulb-on",icon:[640,512,[],"f672","M320,64A112.14,112.14,0,0,0,208,176a16,16,0,0,0,32,0,80.09,80.09,0,0,1,80-80,16,16,0,0,0,0-32Zm0-64C217.06,0,143.88,83.55,144,176.23a175,175,0,0,0,43.56,115.55C213.22,321,237.84,368.69,240,384l.06,75.19a15.88,15.88,0,0,0,2.69,8.83l24.5,36.84A16,16,0,0,0,280.56,512h78.85a16,16,0,0,0,13.34-7.14L397.25,468a16.17,16.17,0,0,0,2.69-8.83L400,384c2.25-15.72,27-63.19,52.44-92.22A175.9,175.9,0,0,0,320,0Zm47.94,454.31L350.84,480H289.12l-17.06-25.69,0-6.31h95.91ZM368,416H272l-.06-32H368Zm60.41-145.31c-14,15.95-36.32,48.09-50.57,81.29H262.22c-14.28-33.21-36.6-65.34-50.6-81.29A143.47,143.47,0,0,1,176.06,176C175.88,99,236.44,32,320,32c79.41,0,144,64.59,144,144A143.69,143.69,0,0,1,428.38,270.69ZM96,176a16,16,0,0,0-16-16H16a16,16,0,0,0,0,32H80A16,16,0,0,0,96,176ZM528,64a16.17,16.17,0,0,0,7.16-1.69l64-32A16,16,0,0,0,584.84,1.69l-64,32A16,16,0,0,0,528,64Zm96,96H560a16,16,0,0,0,0,32h64a16,16,0,0,0,0-32ZM119.16,33.69l-64-32A16,16,0,0,0,40.84,30.31l64,32A16.17,16.17,0,0,0,112,64a16,16,0,0,0,7.16-30.31Zm480,288-64-32a16,16,0,0,0-14.32,28.63l64,32a16,16,0,0,0,14.32-28.63ZM112,288a16.17,16.17,0,0,0-7.16,1.69l-64,32a16,16,0,0,0,14.32,28.63l64-32A16,16,0,0,0,112,288Z"]},br={prefix:"fal",iconName:"location",icon:[512,512,[],"f601","M504 240h-56.81C439.48 146.76 365.24 72.52 272 64.81V8c0-4.42-3.58-8-8-8h-16c-4.42 0-8 3.58-8 8v56.81C146.76 72.52 72.52 146.76 64.81 240H8c-4.42 0-8 3.58-8 8v16c0 4.42 3.58 8 8 8h56.81c7.71 93.24 81.95 167.48 175.19 175.19V504c0 4.42 3.58 8 8 8h16c4.42 0 8-3.58 8-8v-56.81c93.24-7.71 167.48-81.95 175.19-175.19H504c4.42 0 8-3.58 8-8v-16c0-4.42-3.58-8-8-8zM256 416c-88.22 0-160-71.78-160-160S167.78 96 256 96s160 71.78 160 160-71.78 160-160 160zm0-256c-53.02 0-96 42.98-96 96s42.98 96 96 96 96-42.98 96-96-42.98-96-96-96zm0 160c-35.29 0-64-28.71-64-64s28.71-64 64-64 64 28.71 64 64-28.71 64-64 64z"]},yr={prefix:"fal",iconName:"long-arrow-right",icon:[448,512,[],"f178","M311.03 131.515l-7.071 7.07c-4.686 4.686-4.686 12.284 0 16.971L387.887 239H12c-6.627 0-12 5.373-12 12v10c0 6.627 5.373 12 12 12h375.887l-83.928 83.444c-4.686 4.686-4.686 12.284 0 16.971l7.071 7.07c4.686 4.686 12.284 4.686 16.97 0l116.485-116c4.686-4.686 4.686-12.284 0-16.971L328 131.515c-4.686-4.687-12.284-4.687-16.97 0z"]},wr={prefix:"fal",iconName:"minus",icon:[384,512,[],"f068","M376 232H8c-4.42 0-8 3.58-8 8v32c0 4.42 3.58 8 8 8h368c4.42 0 8-3.58 8-8v-32c0-4.42-3.58-8-8-8z"]},xr={prefix:"fal",iconName:"paper-plane",icon:[512,512,[],"f1d8","M464 4.3L16 262.7C-7 276-4.7 309.9 19.8 320L160 378v102c0 30.2 37.8 43.3 56.7 20.3l60.7-73.8 126.4 52.2c19.1 7.9 40.7-4.2 43.8-24.7l64-417.1C515.7 10.2 487-9 464 4.3zM192 480v-88.8l54.5 22.5L192 480zm224-30.9l-206.2-85.2 199.5-235.8c4.8-5.6-2.9-13.2-8.5-8.4L145.5 337.3 32 290.5 480 32l-64 417.1z"]},Er={prefix:"fal",iconName:"paperclip",icon:[512,512,[],"f0c6","M149.106 512c-33.076 0-66.153-12.59-91.333-37.771-50.364-50.361-50.364-132.305-.002-182.665L319.842 29.498c39.331-39.331 103.328-39.331 142.66 0 39.331 39.332 39.331 103.327 0 142.657l-222.63 222.626c-28.297 28.301-74.347 28.303-102.65 0-28.3-28.301-28.3-74.349 0-102.649l170.301-170.298c4.686-4.686 12.284-4.686 16.97 0l5.661 5.661c4.686 4.686 4.686 12.284 0 16.971l-170.3 170.297c-15.821 15.821-15.821 41.563.001 57.385 15.821 15.82 41.564 15.82 57.385 0l222.63-222.626c26.851-26.851 26.851-70.541 0-97.394-26.855-26.851-70.544-26.849-97.395 0L80.404 314.196c-37.882 37.882-37.882 99.519 0 137.401 37.884 37.881 99.523 37.882 137.404.001l217.743-217.739c4.686-4.686 12.284-4.686 16.97 0l5.661 5.661c4.686 4.686 4.686 12.284 0 16.971L240.44 474.229C215.26 499.41 182.183 512 149.106 512z"]},kr={prefix:"fal",iconName:"pen",icon:[512,512,[],"f304","M493.25 56.26l-37.51-37.51C443.25 6.25 426.87 0 410.49 0s-32.76 6.25-45.26 18.74L12.85 371.12.15 485.34C-1.45 499.72 9.88 512 23.95 512c.89 0 1.78-.05 2.69-.15l114.14-12.61 352.48-352.48c24.99-24.99 24.99-65.51-.01-90.5zM126.09 468.68l-93.03 10.31 10.36-93.17 263.89-263.89 82.77 82.77-263.99 263.98zm344.54-344.54l-57.93 57.93-82.77-82.77 57.93-57.93c6.04-6.04 14.08-9.37 22.63-9.37 8.55 0 16.58 3.33 22.63 9.37l37.51 37.51c12.47 12.48 12.47 32.78 0 45.26z"]},Sr={prefix:"fal",iconName:"plus",icon:[384,512,[],"f067","M376 232H216V72c0-4.42-3.58-8-8-8h-32c-4.42 0-8 3.58-8 8v160H8c-4.42 0-8 3.58-8 8v32c0 4.42 3.58 8 8 8h160v160c0 4.42 3.58 8 8 8h32c4.42 0 8-3.58 8-8V280h160c4.42 0 8-3.58 8-8v-32c0-4.42-3.58-8-8-8z"]},Or={prefix:"fal",iconName:"plus-circle",icon:[512,512,[],"f055","M384 250v12c0 6.6-5.4 12-12 12h-98v98c0 6.6-5.4 12-12 12h-12c-6.6 0-12-5.4-12-12v-98h-98c-6.6 0-12-5.4-12-12v-12c0-6.6 5.4-12 12-12h98v-98c0-6.6 5.4-12 12-12h12c6.6 0 12 5.4 12 12v98h98c6.6 0 12 5.4 12 12zm120 6c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-32 0c0-119.9-97.3-216-216-216-119.9 0-216 97.3-216 216 0 119.9 97.3 216 216 216 119.9 0 216-97.3 216-216z"]},Cr={prefix:"fal",iconName:"save",icon:[448,512,[],"f0c7","M433.941 129.941l-83.882-83.882A48 48 0 0 0 316.118 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V163.882a48 48 0 0 0-14.059-33.941zM288 64v96H96V64h192zm128 368c0 8.822-7.178 16-16 16H48c-8.822 0-16-7.178-16-16V80c0-8.822 7.178-16 16-16h16v104c0 13.255 10.745 24 24 24h208c13.255 0 24-10.745 24-24V64.491a15.888 15.888 0 0 1 7.432 4.195l83.882 83.882A15.895 15.895 0 0 1 416 163.882V432zM224 232c-48.523 0-88 39.477-88 88s39.477 88 88 88 88-39.477 88-88-39.477-88-88-88zm0 144c-30.879 0-56-25.121-56-56s25.121-56 56-56 56 25.121 56 56-25.121 56-56 56z"]},Tr={prefix:"fal",iconName:"search",icon:[512,512,[],"f002","M508.5 481.6l-129-129c-2.3-2.3-5.3-3.5-8.5-3.5h-10.3C395 312 416 262.5 416 208 416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c54.5 0 104-21 141.1-55.2V371c0 3.2 1.3 6.2 3.5 8.5l129 129c4.7 4.7 12.3 4.7 17 0l9.9-9.9c4.7-4.7 4.7-12.3 0-17zM208 384c-97.3 0-176-78.7-176-176S110.7 32 208 32s176 78.7 176 176-78.7 176-176 176z"]},_r={prefix:"fal",iconName:"search-minus",icon:[512,512,[],"f010","M307.8 223.8h-200c-6.6 0-12-5.4-12-12v-8c0-6.6 5.4-12 12-12h200c6.6 0 12 5.4 12 12v8c0 6.6-5.4 12-12 12zM508.3 497L497 508.3c-4.7 4.7-12.3 4.7-17 0l-129-129c-2.3-2.3-3.5-5.3-3.5-8.5v-8.5C310.6 395.7 261.7 416 208 416 93.8 416 1.5 324.9 0 210.7-1.5 93.7 93.7-1.5 210.7 0 324.9 1.5 416 93.8 416 208c0 53.7-20.3 102.6-53.7 139.5h8.5c3.2 0 6.2 1.3 8.5 3.5l129 129c4.7 4.7 4.7 12.3 0 17zM384 208c0-97.3-78.7-176-176-176S32 110.7 32 208s78.7 176 176 176 176-78.7 176-176z"]},Pr={prefix:"fal",iconName:"share-alt",icon:[448,512,[],"f1e0","M352 320c-28.6 0-54.2 12.5-71.8 32.3l-95.5-59.7c9.6-23.4 9.7-49.8 0-73.2l95.5-59.7c17.6 19.8 43.2 32.3 71.8 32.3 53 0 96-43 96-96S405 0 352 0s-96 43-96 96c0 13 2.6 25.3 7.2 36.6l-95.5 59.7C150.2 172.5 124.6 160 96 160c-53 0-96 43-96 96s43 96 96 96c28.6 0 54.2-12.5 71.8-32.3l95.5 59.7c-4.7 11.3-7.2 23.6-7.2 36.6 0 53 43 96 96 96s96-43 96-96c-.1-53-43.1-96-96.1-96zm0-288c35.3 0 64 28.7 64 64s-28.7 64-64 64-64-28.7-64-64 28.7-64 64-64zM96 320c-35.3 0-64-28.7-64-64s28.7-64 64-64 64 28.7 64 64-28.7 64-64 64zm256 160c-35.3 0-64-28.7-64-64s28.7-64 64-64 64 28.7 64 64-28.7 64-64 64z"]},Ar={prefix:"fal",iconName:"sitemap",icon:[640,512,[],"f0e8","M608 352h-32v-97.59c0-16.77-13.62-30.41-30.41-30.41H336v-64h48c17.67 0 32-14.33 32-32V32c0-17.67-14.33-32-32-32H256c-17.67 0-32 14.33-32 32v96c0 17.67 14.33 32 32 32h48v64H94.41C77.62 224 64 237.64 64 254.41V352H32c-17.67 0-32 14.33-32 32v96c0 17.67 14.33 32 32 32h96c17.67 0 32-14.33 32-32v-96c0-17.67-14.33-32-32-32H96v-96h208v96h-32c-17.67 0-32 14.33-32 32v96c0 17.67 14.33 32 32 32h96c17.67 0 32-14.33 32-32v-96c0-17.67-14.33-32-32-32h-32v-96h208v96h-32c-17.67 0-32 14.33-32 32v96c0 17.67 14.33 32 32 32h96c17.67 0 32-14.33 32-32v-96c0-17.67-14.33-32-32-32zm-480 32v96H32v-96h96zm240 0v96h-96v-96h96zM256 128V32h128v96H256zm352 352h-96v-96h96v96z"]},Mr={prefix:"fal",iconName:"sliders-h",icon:[512,512,[],"f1de","M504 384H192v-40c0-13.3-10.7-24-24-24h-48c-13.3 0-24 10.7-24 24v40H8c-4.4 0-8 3.6-8 8v16c0 4.4 3.6 8 8 8h88v40c0 13.3 10.7 24 24 24h48c13.3 0 24-10.7 24-24v-40h312c4.4 0 8-3.6 8-8v-16c0-4.4-3.6-8-8-8zm-344 64h-32v-96h32v96zM504 96H256V56c0-13.3-10.7-24-24-24h-48c-13.3 0-24 10.7-24 24v40H8c-4.4 0-8 3.6-8 8v16c0 4.4 3.6 8 8 8h152v40c0 13.3 10.7 24 24 24h48c13.3 0 24-10.7 24-24v-40h248c4.4 0 8-3.6 8-8v-16c0-4.4-3.6-8-8-8zm-280 64h-32V64h32v96zm280 80h-88v-40c0-13.3-10.7-24-24-24h-48c-13.3 0-24 10.7-24 24v40H8c-4.4 0-8 3.6-8 8v16c0 4.4 3.6 8 8 8h312v40c0 13.3 10.7 24 24 24h48c13.3 0 24-10.7 24-24v-40h88c4.4 0 8-3.6 8-8v-16c0-4.4-3.6-8-8-8zm-120 64h-32v-96h32v96z"]},zr={prefix:"fal",iconName:"spinner-third",icon:[512,512,[],"f3f4","M460.115 373.846l-6.941-4.008c-5.546-3.202-7.564-10.177-4.661-15.886 32.971-64.838 31.167-142.731-5.415-205.954-36.504-63.356-103.118-103.876-175.8-107.701C260.952 39.963 256 34.676 256 28.321v-8.012c0-6.904 5.808-12.337 12.703-11.982 83.552 4.306 160.157 50.861 202.106 123.67 42.069 72.703 44.083 162.322 6.034 236.838-3.14 6.149-10.75 8.462-16.728 5.011z"]},Fr={prefix:"fal",iconName:"sync",icon:[512,512,[],"f021","M492 8h-10c-6.627 0-12 5.373-12 12v110.627C426.929 57.261 347.224 8 256 8 123.228 8 14.824 112.338 8.31 243.493 7.971 250.311 13.475 256 20.301 256h10.016c6.353 0 11.646-4.949 11.977-11.293C48.157 132.216 141.097 42 256 42c82.862 0 154.737 47.077 190.289 116H332c-6.627 0-12 5.373-12 12v10c0 6.627 5.373 12 12 12h160c6.627 0 12-5.373 12-12V20c0-6.627-5.373-12-12-12zm-.301 248h-10.015c-6.352 0-11.647 4.949-11.977 11.293C463.841 380.158 370.546 470 256 470c-82.608 0-154.672-46.952-190.299-116H180c6.627 0 12-5.373 12-12v-10c0-6.627-5.373-12-12-12H20c-6.627 0-12 5.373-12 12v160c0 6.627 5.373 12 12 12h10c6.627 0 12-5.373 12-12V381.373C85.071 454.739 164.777 504 256 504c132.773 0 241.176-104.338 247.69-235.493.339-6.818-5.165-12.507-11.991-12.507z"]},jr={prefix:"fal",iconName:"tachometer-alt",icon:[576,512,[],"f3fd","M288 152c13.26 0 24-10.74 24-24s-10.74-24-24-24-24 10.74-24 24 10.74 24 24 24zm-136 8c-13.26 0-24 10.74-24 24s10.74 24 24 24 24-10.74 24-24-10.74-24-24-24zm272 0c-13.26 0-24 10.74-24 24s10.74 24 24 24 24-10.74 24-24-10.74-24-24-24zm56 136c-13.26 0-24 10.74-24 24s10.74 24 24 24 24-10.74 24-24-10.74-24-24-24zM288 32C128.94 32 0 160.94 0 320c0 52.8 14.25 102.26 39.06 144.8 5.61 9.62 16.3 15.2 27.44 15.2h443c11.14 0 21.83-5.58 27.44-15.2C561.75 422.26 576 372.8 576 320c0-159.06-128.94-288-288-288zm221.5 416l-442.8.68C44 409.75 32 365.26 32 320 32 178.84 146.84 64 288 64s256 114.84 256 256c0 45.26-12 89.75-34.5 128zM96 296c-13.26 0-24 10.74-24 24s10.74 24 24 24 24-10.74 24-24-10.74-24-24-24zm269.22-167.12c-8.19-2.78-17.44 1.55-20.34 9.89l-51.83 149.74c-1.69-.13-3.31-.51-5.04-.51-35.35 0-64 28.65-64 64s28.65 64 64 64 64-28.65 64-64c0-22.25-11.38-41.82-28.62-53.29l51.74-149.48c2.87-8.34-1.54-17.46-9.91-20.35zM288 384c-17.64 0-32-14.36-32-32s14.36-32 32-32 32 14.36 32 32-14.36 32-32 32z"]},Dr={prefix:"fal",iconName:"times",icon:[320,512,[],"f00d","M193.94 256L296.5 153.44l21.15-21.15c3.12-3.12 3.12-8.19 0-11.31l-22.63-22.63c-3.12-3.12-8.19-3.12-11.31 0L160 222.06 36.29 98.34c-3.12-3.12-8.19-3.12-11.31 0L2.34 120.97c-3.12 3.12-3.12 8.19 0 11.31L126.06 256 2.34 379.71c-3.12 3.12-3.12 8.19 0 11.31l22.63 22.63c3.12 3.12 8.19 3.12 11.31 0L160 289.94 262.56 392.5l21.15 21.15c3.12 3.12 8.19 3.12 11.31 0l22.63-22.63c3.12-3.12 3.12-8.19 0-11.31L193.94 256z"]},Rr={prefix:"fal",iconName:"times-circle",icon:[512,512,[],"f057","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 464c-118.7 0-216-96.1-216-216 0-118.7 96.1-216 216-216 118.7 0 216 96.1 216 216 0 118.7-96.1 216-216 216zm94.8-285.3L281.5 256l69.3 69.3c4.7 4.7 4.7 12.3 0 17l-8.5 8.5c-4.7 4.7-12.3 4.7-17 0L256 281.5l-69.3 69.3c-4.7 4.7-12.3 4.7-17 0l-8.5-8.5c-4.7-4.7-4.7-12.3 0-17l69.3-69.3-69.3-69.3c-4.7-4.7-4.7-12.3 0-17l8.5-8.5c4.7-4.7 12.3-4.7 17 0l69.3 69.3 69.3-69.3c4.7-4.7 12.3-4.7 17 0l8.5 8.5c4.6 4.7 4.6 12.3 0 17z"]},Ir={prefix:"fal",iconName:"trash-alt",icon:[448,512,[],"f2ed","M296 432h16a8 8 0 0 0 8-8V152a8 8 0 0 0-8-8h-16a8 8 0 0 0-8 8v272a8 8 0 0 0 8 8zm-160 0h16a8 8 0 0 0 8-8V152a8 8 0 0 0-8-8h-16a8 8 0 0 0-8 8v272a8 8 0 0 0 8 8zM440 64H336l-33.6-44.8A48 48 0 0 0 264 0h-80a48 48 0 0 0-38.4 19.2L112 64H8a8 8 0 0 0-8 8v16a8 8 0 0 0 8 8h24v368a48 48 0 0 0 48 48h288a48 48 0 0 0 48-48V96h24a8 8 0 0 0 8-8V72a8 8 0 0 0-8-8zM171.2 38.4A16.1 16.1 0 0 1 184 32h80a16.1 16.1 0 0 1 12.8 6.4L296 64H152zM384 464a16 16 0 0 1-16 16H80a16 16 0 0 1-16-16V96h320zm-168-32h16a8 8 0 0 0 8-8V152a8 8 0 0 0-8-8h-16a8 8 0 0 0-8 8v272a8 8 0 0 0 8 8z"]},Lr={prefix:"fal",iconName:"undo",icon:[512,512,[],"f0e2","M20 8h10c6.627 0 12 5.373 12 12v110.625C85.196 57.047 165.239 7.715 256.793 8.001 393.18 8.428 504.213 120.009 504 256.396 503.786 393.181 392.834 504 256 504c-63.926 0-122.202-24.187-166.178-63.908-5.113-4.618-5.354-12.561-.482-17.433l7.069-7.069c4.503-4.503 11.749-4.714 16.482-.454C150.782 449.238 200.935 470 256 470c117.744 0 214-95.331 214-214 0-117.744-95.331-214-214-214-82.862 0-154.737 47.077-190.289 116H180c6.627 0 12 5.373 12 12v10c0 6.627-5.373 12-12 12H20c-6.627 0-12-5.373-12-12V20c0-6.627 5.373-12 12-12z"]},Nr={prefix:"fal",iconName:"user",icon:[448,512,[],"f007","M313.6 288c-28.7 0-42.5 16-89.6 16-47.1 0-60.8-16-89.6-16C60.2 288 0 348.2 0 422.4V464c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48v-41.6c0-74.2-60.2-134.4-134.4-134.4zM416 464c0 8.8-7.2 16-16 16H48c-8.8 0-16-7.2-16-16v-41.6C32 365.9 77.9 320 134.4 320c19.6 0 39.1 16 89.6 16 50.4 0 70-16 89.6-16 56.5 0 102.4 45.9 102.4 102.4V464zM224 256c70.7 0 128-57.3 128-128S294.7 0 224 0 96 57.3 96 128s57.3 128 128 128zm0-224c52.9 0 96 43.1 96 96s-43.1 96-96 96-96-43.1-96-96 43.1-96 96-96z"]},Vr=function(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var r,o,i=n.call(e),a=[];try{for(;(void 0===t||t-- >0)&&!(r=i.next()).done;)a.push(r.value)}catch(e){o={error:e}}finally{try{r&&!r.done&&(n=i.return)&&n.call(i)}finally{if(o)throw o.error}}return a},Hr=function(){for(var e=[],t=0;t<arguments.length;t++)e=e.concat(Vr(arguments[t]));return e},Br=[Yn,Mn,Qn,Xn,Gn,zn,qn,Kn,Zn,Jn,Bn,jn,Fn,er,Dn,tr,nr,Wn,rr,or,ir,ar,lr,ur,cr,sr,Rn,fr,In,Un,pr,dr,hr,mr,gr,vr,br,yr,wr,Ln,Er,xr,kr,Sr,Or,Nn,Cr,Tr,_r,Pr,Ar,Mr,Vn,zr,$n,Fr,jr,Dr,Rr,Ir,Lr,Nr,Hn];function Wr(e){var t=e.tracker,n=e.onSendMessage,o=e.username,i=r.useMemo((function(){return t&&t.events&&t.events.slice(0).reverse().filter((function(e){return"user"===e.event||"bot"===e.event}))}),[t]);return r.createElement(l.Flex,{flexDirection:"column",sx:{height:"100%"}},r.createElement(l.Flex,{flexDirection:"column-reverse",sx:{height:"100%",overflow:"auto"},px:4,py:3},i?i.map((function(e,t){return r.createElement(l.Box,{my:2,key:t},r.createElement(fn,{username:"user"===e.event?o:null,contents:e.data,reverseAlign:"user"===e.event,text:e.text}))})):r.createElement(l.Flex,{justifyContent:"center",flex:1},r.createElement(yn,{heading:"Talk to your assistant",body:"Your assistant will be ready to talk to you once you train a model"}))),r.createElement(l.Flex,{sx:{borderTop:"solid 1px",borderColor:"neutral_4",marginTop:"auto"}},r.createElement(P,{onSendMessage:n})))}var Ur={init:function(e){var t=e.username,n=e.onSendMessage,r=e.tracker,i=e.selector;A.b.add.apply(A.b,Hr(Br)),a.a.render(o.a.createElement(An,{theme:Pn},o.a.createElement(Wr,{onSendMessage:n,tracker:r,username:t})),document.querySelector(i))}}}])})); diff --git a/docs/_static/spec/action-server.yml b/docs/static/spec/action-server.yml similarity index 100% rename from docs/_static/spec/action-server.yml rename to docs/static/spec/action-server.yml diff --git a/docs/_static/spec/rasa.yml b/docs/static/spec/rasa.yml similarity index 90% rename from docs/_static/spec/rasa.yml rename to docs/static/spec/rasa.yml index 4472963ec281..02fa0933674f 100644 --- a/docs/_static/spec/rasa.yml +++ b/docs/static/spec/rasa.yml @@ -69,9 +69,9 @@ paths: operationId: getStatus tags: - Server Information - summary: Status of the currently loaded Rasa model + summary: Status of the Rasa server description: >- - Information about the currently loaded Rasa model. + Information about the server and the currently loaded Rasa model. responses: 200: description: Success @@ -98,6 +98,10 @@ paths: type: string description: Path of the loaded model example: 20190429-103105.tar.gz + num_active_training_jobs: + type: integer + description: Number of running training processes + example: 2 401: $ref: '#/components/responses/401NotAuthenticated' 403: @@ -251,8 +255,9 @@ paths: tags: - Tracker summary: Run an action in a conversation + deprecated: true description: >- - Runs the action, calling the action server if necessary. + DEPRECATED. Runs the action, calling the action server if necessary. Any responses sent by the executed action will be forwarded to the channel specified in the output_channel parameter. If no output channel is specified, any messages that should be @@ -292,6 +297,57 @@ paths: 500: $ref: '#/components/responses/500ServerError' + /conversations/{conversation_id}/trigger_intent: + post: + security: + - TokenAuth: [] + - JWT: [] + operationId: triggerConversationIntent + tags: + - Tracker + summary: Inject an intent into a conversation + description: >- + Sends a specified intent and list of entities in place of a + user message. The bot then predicts and executes a response action. + Any responses sent by the executed action will be forwarded + to the channel specified in the ``output_channel`` parameter. + If no output channel is specified, any messages that should be + sent to the user will be included in the response of this endpoint. + parameters: + - $ref: '#/components/parameters/conversation_id' + - $ref: '#/components/parameters/include_events' + - $ref: '#/components/parameters/output_channel' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/IntentTriggerRequest' + responses: + 200: + description: Success + content: + application/json: + schema: + type: object + properties: + tracker: + $ref: '#/components/schemas/Tracker' + messages: + type: array + items: + $ref: '#/components/schemas/BotMessage' + 400: + $ref: '#/components/responses/400BadRequest' + 401: + $ref: '#/components/responses/401NotAuthenticated' + 403: + $ref: '#/components/responses/403NotAuthorized' + 409: + $ref: '#/components/responses/409Conflict' + 500: + $ref: '#/components/responses/500ServerError' + /conversations/{conversation_id}/predict: post: security: @@ -379,12 +435,34 @@ paths: Trains a new Rasa model. Depending on the data given only a dialogue model, only a NLU model, or a model combining a trained dialogue model with an NLU model will be trained. The new model is not loaded by default. + parameters: + - in: query + name: save_to_default_model_directory + schema: + type: boolean + default: True + description: >- + If `true` (default) the trained model will be saved in the default model + directory, if `false` it will be saved in a temporary directory + - in: query + name: force_training + schema: + type: boolean + default: False + description: Force a model training even if the data has not changed requestBody: required: true + description: >- + The training data can either be in YAML format or a JSON + which contains Rasa training data in Markdown format for each required key. content: application/json: schema: - $ref: '#/components/schemas/TrainingRequest' + $ref: '#/components/schemas/JSONTrainingRequest' + application/x-yaml: + schema: + type: string + responses: 200: description: Zipped Rasa model @@ -778,6 +856,7 @@ components: - telegram - twilio - webexteams + - socketio responses: @@ -901,21 +980,34 @@ components: type: object properties: name: - description: >- - Name of the action to be executed. + description: Name of the action to be executed. type: string example: utter_greet policy: - description: >- - Name of the policy that predicted the action (optional). + description: Name of the policy that predicted the action. type: string + nullable: true confidence: - description: >- - Confidence of the prediction (optional). + description: Confidence of the prediction. type: number + nullable: true example: 0.987232 required: ["name"] + IntentTriggerRequest: + type: object + properties: + name: + description: Name of the intent to be executed. + type: string + example: greet + entities: + description: Entities to be passed on. + type: object + nullable: true + example: {"temperature": "high"} + required: ["name"] + Message: type: object properties: @@ -1040,7 +1132,7 @@ components: type: object additionalProperties: $ref: '#/components/schemas/SlotDescription' - templates: + responses: description: Bot response templates type: object additionalProperties: @@ -1050,7 +1142,7 @@ components: type: array items: type: string - example: ['utter_greet', 'utter_goodbye', 'action_listen'] + example: ['action_greet', 'action_goodbye', 'action_listen'] BotMessage: type: object @@ -1095,7 +1187,7 @@ components: properties: conversation_id: type: string - description: Id of the conversation + description: ID of the conversation example: default slots: type: array @@ -1113,7 +1205,7 @@ components: description: Deterministic scheduled next action paused: type: boolean - description: Bot is pasued + description: Bot is paused example: false events: type: array @@ -1128,13 +1220,13 @@ components: type: string description: Name of last bot action example: action_listen - active_form: + active_loop: type: object - description: Name of the active form + description: Name of the active loop properties: name: type: string - description: Name of the acive form + description: Name of the active loop example: restaurant_form Error: @@ -1184,7 +1276,7 @@ components: type: string description: >- Policy which predicted the most likely action - example: policy_2_KerasPolicy + example: policy_2_TEDPolicy tracker: $ref: '#/components/schemas/Tracker' @@ -1213,7 +1305,7 @@ components: type: integer description: Time to wait between pulls from model server - TrainingRequest: + JSONTrainingRequest: type: object properties: domain: @@ -1222,17 +1314,22 @@ components: $ref: '#/components/schemas/ConfigFile' nlu: $ref: '#/components/schemas/NLUTrainingData' + responses: + $ref: '#/components/schemas/RetrievalIntentsTrainingData' stories: $ref: '#/components/schemas/StoriesTrainingData' - out: - type: string - description: Output directory - example: models force: type: boolean description: >- Force a model training even if the data has not changed example: false + deprecated: True + save_to_default_model_directory: + type: boolean + description: >- + If `true` (default) the trained model will be saved in the default model + directory, if `false` it will be saved in a temporary directory + deprecated: True required: ["config"] NLUTrainingData: @@ -1285,6 +1382,17 @@ components: - unhappy + RetrievalIntentsTrainingData: + type: string + description: Rasa response texts for retrieval intents in markdown format + example: >- + ## ask name + * chitchat/ask_name + - my name is Sara, Rasa's documentation bot! + + ## ask weather + * chitchat/ask_weather + - it's always sunny where I live StoriesTrainingData: type: string @@ -1350,14 +1458,7 @@ components: - mood_great - mood_unhappy - actions: - - utter_greet - - utter_cheer_up - - utter_did_that_help - - utter_happy - - utter_goodbye - - templates: + responses: utter_greet: - text: "Hey! How are you?" @@ -1384,7 +1485,7 @@ components: policies: - name: MemoizationPolicy - - name: KerasPolicy + - name: TEDPolicy TrainingResult: type: string @@ -1587,4 +1688,3 @@ components: properties: use_entities: type: boolean - diff --git a/docs/themes/theme-live-codeblock/index.js b/docs/themes/theme-live-codeblock/index.js new file mode 100644 index 000000000000..da243034ce27 --- /dev/null +++ b/docs/themes/theme-live-codeblock/index.js @@ -0,0 +1,26 @@ +const path = require('path'); + + +// FIXME: this package is copied from +// https://github.com/facebook/docusaurus/tree/afe9ff91a4247316f0081c9b080655d575298416/packages/docusaurus-theme-live-codeblock/src +module.exports = function() { + return { + name: 'theme-live-codeblock', + + getThemePath() { + return path.resolve(__dirname, './theme'); + }, + + configureWebpack() { + return { + resolve: { + alias: { + // fork of Buble which removes Buble's large dependency and weighs in at a smaller size of ~51kB + // https://github.com/FormidableLabs/react-live#what-bundle-size-can-i-expect + buble: '@philpl/buble', + }, + }, + }; + }, + }; +}; diff --git a/docs/themes/theme-live-codeblock/theme/CodeBlock/index.jsx b/docs/themes/theme-live-codeblock/theme/CodeBlock/index.jsx new file mode 100755 index 000000000000..4a0b9143bd78 --- /dev/null +++ b/docs/themes/theme-live-codeblock/theme/CodeBlock/index.jsx @@ -0,0 +1,30 @@ +import React from 'react'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import usePrismTheme from '@theme/hooks/usePrismTheme'; +import Playground from '@theme/Playground'; +import ReactLiveScope from '@theme/ReactLiveScope'; +import CodeBlock from '@theme-init/CodeBlock'; + +const withLiveEditor = (Component) => { + const WrappedComponent = (props) => { + const {isClient} = useDocusaurusContext(); + const prismTheme = usePrismTheme(); + + if (props.live) { + return ( + <Playground + key={isClient} + scope={ReactLiveScope} + theme={prismTheme} + {...props} + /> + ); + } + + return <Component {...props} />; + }; + + return WrappedComponent; +}; + +export default withLiveEditor(CodeBlock); diff --git a/docs/themes/theme-live-codeblock/theme/Playground/index.jsx b/docs/themes/theme-live-codeblock/theme/Playground/index.jsx new file mode 100755 index 000000000000..b6fafe9ce1d8 --- /dev/null +++ b/docs/themes/theme-live-codeblock/theme/Playground/index.jsx @@ -0,0 +1,47 @@ +import * as React from 'react'; +import {LiveProvider, LiveEditor, LiveError, LivePreview} from 'react-live'; +import clsx from 'clsx'; +import ThemeContext from '@theme/theme-context'; + +import styles from './styles.module.css'; + + +function Playground({children, theme, transformCode, noResult, name, ...props}) { + const code = children.replace(/\n$/, ''); + const themeContext = React.useContext(ThemeContext); + + // only run this when mounting + React.useEffect(() => { + themeContext.onLiveCodeStart(name, code); + }, []); + + return ( + <LiveProvider + code={code} + transformCode={transformCode || ((code) => `${code};`)} + theme={theme} + {...props}> + <div + className={clsx( + styles.playgroundHeader, + styles.playgroundEditorHeader, + )}> + Live Editor + </div> + <LiveEditor className={styles.playgroundEditor} onChange={value => themeContext.onLiveCodeChange(name, value)} /> + {noResult ? undefined : <div + className={clsx( + styles.playgroundHeader, + styles.playgroundPreviewHeader, + )}> + Result + </div>} + {noResult ? undefined : <div className={styles.playgroundPreview}> + <LivePreview /> + <LiveError /> + </div>} + </LiveProvider> + ); +} + +export default Playground; diff --git a/docs/themes/theme-live-codeblock/theme/Playground/styles.module.css b/docs/themes/theme-live-codeblock/theme/Playground/styles.module.css new file mode 100755 index 000000000000..871ff46be145 --- /dev/null +++ b/docs/themes/theme-live-codeblock/theme/Playground/styles.module.css @@ -0,0 +1,24 @@ +.playgroundHeader { + letter-spacing: 0.08rem; + padding: 0.75rem; + text-transform: uppercase; + font-weight: bold; +} + +.playgroundEditorHeader { + background: rgb(32, 35, 42); + color: #999; +} + +.playgroundPreviewHeader { + background: rgb(236, 236, 236); + color: rgb(109, 109, 109); +} + +.playgroundPreview { + border: 1px solid #f0f0f0; + border-bottom-left-radius: var(--ifm-global-radius); + border-bottom-right-radius: var(--ifm-global-radius); + position: relative; + padding: 1rem; +} diff --git a/docs/themes/theme-live-codeblock/theme/ReactLiveScope/index.js b/docs/themes/theme-live-codeblock/theme/ReactLiveScope/index.js new file mode 100644 index 000000000000..e0e07d4e31eb --- /dev/null +++ b/docs/themes/theme-live-codeblock/theme/ReactLiveScope/index.js @@ -0,0 +1,9 @@ +import React from 'react'; + +// Add react-live imports you need here +const ReactLiveScope = { + React, + ...React, +}; + +export default ReactLiveScope; diff --git a/docs/themes/theme-live-codeblock/theme/theme-context.js b/docs/themes/theme-live-codeblock/theme/theme-context.js new file mode 100644 index 000000000000..6faa4fd4b5cf --- /dev/null +++ b/docs/themes/theme-live-codeblock/theme/theme-context.js @@ -0,0 +1,5 @@ +import React from "react"; + +const ThemeContext = React.createContext(); + +export default ThemeContext; diff --git a/docs/user-guide/architecture.rst b/docs/user-guide/architecture.rst deleted file mode 100644 index 73d12e29ade8..000000000000 --- a/docs/user-guide/architecture.rst +++ /dev/null @@ -1,37 +0,0 @@ -:desc: Check the architecture to understand how Rasa uses machine - learning, context and state of the conversation to predict the - next action of the AI Assistant. - -.. _architecture: - -Architecture -============ - -.. edit-link:: - - -Message Handling -^^^^^^^^^^^^^^^^ - -This diagram shows the basic steps of how an assistant built with Rasa -responds to a message: - -.. image:: ../_static/images/rasa-message-processing.png - -The steps are: - -1. The message is received and passed to an ``Interpreter``, which - converts it into a dictionary including the original text, the intent, - and any entities that were found. This part is handled by NLU. -2. The ``Tracker`` is the object which keeps track of conversation state. - It receives the info that a new message has come in. -3. The policy receives the current state of the tracker. -4. The policy chooses which action to take next. -5. The chosen action is logged by the tracker. -6. A response is sent to the user. - - -.. note:: - - Messages can be text typed by a human, or structured input - like a button press. diff --git a/docs/user-guide/cloud-storage.rst b/docs/user-guide/cloud-storage.rst deleted file mode 100644 index b983c25372dd..000000000000 --- a/docs/user-guide/cloud-storage.rst +++ /dev/null @@ -1,64 +0,0 @@ -:desc: Handle Rasa models on premise or in your private cloud for - GDPR-compliant intent recognition and entity extraction. - -.. _cloud-storage: - -Cloud Storage -============= - -.. edit-link:: - -Rasa supports using `S3 <https://aws.amazon.com/s3/>`_ , -`GCS <https://cloud.google.com/storage/>`_ and `Azure Storage <https://azure.microsoft.com/services/storage/>` to save your models. - -* Amazon S3 Storage - S3 is supported using the ``boto3`` module which you can - install with ``pip install boto3``. - - Start the Rasa server with ``remote-storage`` option set to - ``aws``. Get your S3 credentials and set the following - environment variables: - - - ``AWS_SECRET_ACCESS_KEY`` - - ``AWS_ACCESS_KEY_ID`` - - ``AWS_DEFAULT_REGION`` - - ``BUCKET_NAME`` - - ``AWS_ENDPOINT_URL`` - - If there is no bucket with the name ``BUCKET_NAME``, Rasa will create it. - -* Google Cloud Storage - GCS is supported using the ``google-cloud-storage`` package, - which you can install with ``pip install google-cloud-storage``. - - Start the Rasa server with ``remote-storage`` option set to ``gcs``. - - When running on google app engine and compute engine, the auth - credentials are already set up. For running locally or elsewhere, - checkout their - `client repo <https://github.com/GoogleCloudPlatform/python-docs-samples/tree/master/storage/cloud-client#authentication>`_ - for details on setting up authentication. It involves creating - a service account key file from google cloud console, - and setting the ``GOOGLE_APPLICATION_CREDENTIALS`` environment - variable to the path of that key file. - -* Azure Storage - Azure is supported using the ``azure-storage-blob`` package, - which you can install with ``pip install azure-storage-blob``. - - Start the Rasa server with ``remote-storage`` option set to ``azure``. - - The following environment variables must be set: - - - ``AZURE_CONTAINER`` - - ``AZURE_ACCOUNT_NAME`` - - ``AZURE_ACCOUNT_KEY`` - - If there is no container with the name ``AZURE_CONTAINER``, Rasa will create it. - -Models are gzipped before they are saved in the cloud. The gzipped file naming convention -is `{MODEL_NAME}.tar.gz` and it is stored in the root folder of the storage service. -Currently, you are not able to manually specify the path on the cloud storage. - -If storing trained models, Rasa will gzip the new model and upload it to the container. If retrieving/loading models -from the cloud storage, Rasa will download the gzipped model locally and extract the contents to a temporary directory. diff --git a/docs/user-guide/command-line-interface.rst b/docs/user-guide/command-line-interface.rst deleted file mode 100644 index f01b89834795..000000000000 --- a/docs/user-guide/command-line-interface.rst +++ /dev/null @@ -1,283 +0,0 @@ -:desc: Command line interface for open source chatbot framework Rasa. Learn how to train, test and run your machine learning-based conversational AI assistants - -.. _command-line-interface: - -Command Line Interface -====================== - -.. edit-link:: - - -.. contents:: - :local: - -Cheat Sheet -~~~~~~~~~~~ - -The command line interface (CLI) gives you easy-to-remember commands for common tasks. - -========================= ============================================================================================= -Command Effect -========================= ============================================================================================= -``rasa init`` Creates a new project with example training data, actions, and config files. -``rasa train`` Trains a model using your NLU data and stories, saves trained model in ``./models``. -``rasa interactive`` Starts an interactive learning session to create new training data by chatting. -``rasa shell`` Loads your trained model and lets you talk to your assistant on the command line. -``rasa run`` Starts a Rasa server with your trained model. See the :ref:`running-the-server` docs for details. -``rasa run actions`` Starts an action server using the Rasa SDK. -``rasa visualize`` Visualizes stories. -``rasa test`` Tests a trained Rasa model using your test NLU data and stories. -``rasa data split nlu`` Performs a split of your NLU data according to the specified percentages. -``rasa data convert nlu`` Converts NLU training data between different formats. -``rasa x`` Launch Rasa X locally. -``rasa -h`` Shows all available commands. -========================= ============================================================================================= - - -Create a new project -~~~~~~~~~~~~~~~~~~~~ - -A single command sets up a complete project for you with some example training data. - -.. code:: bash - - rasa init - - -This creates the following files: - -.. code:: bash - - . - ├── __init__.py - ├── actions.py - ├── config.yml - ├── credentials.yml - ├── data - │   ├── nlu.md - │   └── stories.md - ├── domain.yml - ├── endpoints.yml - └── models - └── <timestamp>.tar.gz - -The ``rasa init`` command will ask you if you want to train an initial model using this data. -If you answer no, the ``models`` directory will be empty. - -With this project setup, common commands are very easy to remember. -To train a model, type ``rasa train``, to talk to your model on the command line, ``rasa shell``, -to test your model type ``rasa test``. - - -Train a Model -~~~~~~~~~~~~~ - -The main command is: - -.. code:: bash - - rasa train - - -This command trains a Rasa model that combines a Rasa NLU and a Rasa Core model. -If you only want to train an NLU or a Core model, you can run ``rasa train nlu`` or ``rasa train core``. -However, Rasa will automatically skip training Core or NLU if the training data and config haven't changed. - -``rasa train`` will store the trained model in the directory defined by ``--out``. The name of the model -is per default ``<timestamp>.tar.gz``. If you want to name your model differently, you can specify the name -using ``--fixed-model-name``. - -The following arguments can be used to configure the training process: - -.. program-output:: rasa train --help - - -.. note:: - - Make sure training data for Core and NLU are present when training a model using ``rasa train``. - If training data for only one model type is present, the command automatically falls back to - ``rasa train nlu`` or ``rasa train core`` depending on the provided training files. - - -Interactive Learning -~~~~~~~~~~~~~~~~~~~~ - -To start an interactive learning session with your assistant, run - -.. code:: bash - - rasa interactive - - -If you provide a trained model using the ``--model`` argument, the interactive learning process -is started with the provided model. If no model is specified, ``rasa interactive`` will -train a new Rasa model with the data located in ``data/`` if no other directory was passed to the -``--data`` flag. After training the initial model, the interactive learning session starts. -Training will be skipped if the training data and config haven't changed. - -The full list of arguments that can be set for ``rasa interactive`` is: - -.. program-output:: rasa interactive --help - -Talk to your Assistant -~~~~~~~~~~~~~~~~~~~~~~ - -To start a chat session with your assistant on the command line, run: - -.. code:: bash - - rasa shell - -The model that should be used to interact with your bot can be specified by ``--model``. -If you start the shell with an NLU-only model, ``rasa shell`` allows -you to obtain the intent and entities of any text you type on the command line. -If your model includes a trained Core model, you can chat with your bot and see -what the bot predicts as a next action. -If you have trained a combined Rasa model but nevertheless want to see what your model -extracts as intents and entities from text, you can use the command ``rasa shell nlu``. - -To increase the logging level for debugging, run: - -.. code:: bash - - rasa shell --debug - - -The full list of options for ``rasa shell`` is - -.. program-output:: rasa shell --help - - -Start a Server -~~~~~~~~~~~~~~ - -To start a server running your Rasa model, run: - -.. code:: bash - - rasa run - -The following arguments can be used to configure your Rasa server: - -.. program-output:: rasa run --help - -For more information on the additional parameters, see :ref:`running-the-server`. -See the Rasa :ref:`http-api` docs for detailed documentation of all the endpoints. - -.. _run-action-server: - -Start an Action Server -~~~~~~~~~~~~~~~~~~~~~~ - -To run your action server run - -.. code:: bash - - rasa run actions - -The following arguments can be used to adapt the server settings: - -.. program-output:: rasa run actions --help - - -Visualize your Stories -~~~~~~~~~~~~~~~~~~~~~~ - -To open a browser tab with a graph showing your stories: - -.. code:: bash - - rasa visualize - -Normally, training stories in the directory ``data`` are visualized. If your stories are located -somewhere else, you can specify their location with ``--stories``. - -Additional arguments are: - -.. program-output:: rasa visualize --help - - -Evaluate a Model on Test Data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To evaluate your model on test data, run: - -.. code:: bash - - rasa test - - -Specify the model to test using ``--model``. -Check out more details in :ref:`nlu-evaluation` and :ref:`core-evaluation`. - -The following arguments are available for ``rasa test``: - -.. program-output:: rasa test --help - - -.. _train-test-split: - -Create a Train-Test Split -~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create a split of your NLU data, run: - -.. code:: bash - - rasa data split nlu - - -You can specify the training data, the fraction, and the output directory using the following arguments: - -.. program-output:: rasa data split nlu --help - - -This command will attempt to keep the proportions of intents the same in train and test. - - -Convert Data Between Markdown and JSON -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To convert NLU data from LUIS data format, WIT data format, Dialogflow data format, JSON, or Markdown -to JSON or Markdown, run: - -.. code:: bash - - rasa data convert nlu - -You can specify the input file, output file, and the output format with the following arguments: - -.. program-output:: rasa data convert nlu --help - - -.. _section_evaluation: - - -Start Rasa X -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. raw:: html - - Rasa X is a tool that helps you build, improve, and deploy AI Assistants that are powered by the Rasa framework. - You can find more information about it <a class="reference external" href="https://rasa.com/docs/rasa-x/" target="_blank">here</a>. - -You can start Rasa X locally by executing - -.. code:: bash - - rasa x - -.. raw:: html - - To be able to start Rasa X you need to have Rasa X installed (instruction can be found - <a class="reference external" href="https://rasa.com/docs/rasa-x/installation-and-setup/" target="_blank">here</a>) - and you need to be in a Rasa project. - -.. note:: - - By default Rasa X runs on the port 5002. Using the argument ``--rasa-x-port`` allows you to change it to - any other port. - -The following arguments are available for ``rasa x``: - -.. program-output:: rasa x --help diff --git a/docs/user-guide/connectors/cisco-webex-teams.rst b/docs/user-guide/connectors/cisco-webex-teams.rst deleted file mode 100644 index ceb74f0cfb69..000000000000 --- a/docs/user-guide/connectors/cisco-webex-teams.rst +++ /dev/null @@ -1,59 +0,0 @@ -:desc: Build a Rasa Chat Bot on Cisco Webex - -.. _cisco-webex-teams: - -Cisco Webex Teams -================= - -.. edit-link:: - -You first have to create a cisco webex app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Cisco Webex Teams credentials:** - -You need to set up a bot. Please visit below link to create a bot -`Webex Authentication <https://developer.webex.com/authentication.html>`_. - -After you have created the bot through Cisco Webex Teams, you need to create a -room in Cisco Webex Teams. Then add the bot in the room the same way you would -add a person in the room. - -You need to note down the room ID for the room you created. This room ID will -be used in ``room`` variable in the ``credentials.yml`` file. - -Please follow this link below to find the room ID -``https://developer.webex.com/endpoint-rooms-get.html`` - -Running on Cisco Webex -^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the ``webexteams`` input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - webexteams: - access_token: "YOUR-BOT-ACCESS-TOKEN" - room: "YOUR-CISCOWEBEXTEAMS-ROOM-ID" - - -The endpoint for receiving Cisco Webex Teams messages is -``http://localhost:5005/webhooks/webexteams/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the OAuth & Permissions section. - -.. note:: - - If you do not set the ``room`` keyword - argument, messages will by delivered back to - the user who sent them. diff --git a/docs/user-guide/connectors/custom-connectors.rst b/docs/user-guide/connectors/custom-connectors.rst deleted file mode 100644 index afe08442e2d1..000000000000 --- a/docs/user-guide/connectors/custom-connectors.rst +++ /dev/null @@ -1,80 +0,0 @@ -:desc: Deploy and Run a Rasa Chat Bot on a custom chat interface - -.. _custom-connectors: - -Custom Connectors -================= - -.. edit-link:: - -You can also implement your own custom channel. You can -use the ``rasa.core.channels.channel.RestInput`` class as a template. -The methods you need to implement are ``blueprint`` and ``name``. The method -needs to create a sanic blueprint that can be attached to a sanic server. - -This allows you to add REST endpoints to the server that the external -messaging service can call to deliver messages. - -Your blueprint should have at least the two routes: ``health`` on ``/``, -and ``receive`` on the HTTP route ``/webhook``. - -The ``name`` method defines the url prefix. E.g. if your component is -named ``myio``, the webhook you can use to attach the external service is: -``http://localhost:5005/webhooks/myio/webhook`` (replacing the hostname -and port with your values). - -To send a message, you would run a command like: - -.. code-block:: bash - - curl -XPOST http://localhost:5000/webhooks/myio/webhook \ - -d '{"sender": "user1", "message": "hello"}' \ - -H "Content-type: application/json" - -where ``myio`` is the name of your component. - -If you need to use extra information from your front end in your custom -actions, you can add this information in the ``metadata`` dict of your user -message. This information will accompany the user message through the rasa -server into the action server when applicable, where you can find it stored in -the ``tracker``. Message metadata will not directly affect NLU classification -or action prediction. - -Here are all the attributes of ``UserMessage``: - -.. autoclass:: rasa.core.channels.UserMessage - - .. automethod:: __init__ - - -In your implementation of the ``receive`` endpoint, you need to make -sure to call ``on_new_message(UserMessage(text, output, sender_id))``. -This will tell Rasa Core to handle this user message. The ``output`` -is an output channel implementing the ``OutputChannel`` class. You can -either implement the methods for your particular chat channel (e.g. there -are methods to send text and images) or you can use the -``CollectingOutputChannel`` to collect the bot responses Core -creates while the bot is processing your messages and return -them as part of your endpoint response. This is the way the ``RestInput`` -channel is implemented. For examples on how to create and use your own output -channel, take a look at the implementations of the other -output channels, e.g. the ``SlackBot`` in ``rasa.core.channels.slack``. - -To use a custom channel, you need to supply a credentials configuration file -``credentials.yml`` with the command line argument ``--credentials``. -This credentials file has to contain the module path of your custom channel and -any required configuration parameters. For example, this could look like: - -.. code-block:: yaml - - mypackage.MyIO: - username: "user_name" - another_parameter: "some value" - -Here is an example implementation for an input channel that receives the messages, -hands them over to Rasa Core, collects the bot utterances, and returns -these bot utterances as the json response to the webhook call that -posted the message to the channel: - -.. literalinclude:: ../../../rasa/core/channels/channel.py - :pyobject: RestInput diff --git a/docs/user-guide/connectors/facebook-messenger.rst b/docs/user-guide/connectors/facebook-messenger.rst deleted file mode 100644 index b0feba886f1a..000000000000 --- a/docs/user-guide/connectors/facebook-messenger.rst +++ /dev/null @@ -1,71 +0,0 @@ -:desc: Build a Rasa Chat Bot on Facebook Messenger - -.. _facebook-messenger: - -Facebook Messenger -================== - -.. edit-link:: - -Facebook Setup --------------- - -You first need to set up a facebook page and app to get credentials to connect to -Facebook Messenger. Once you have them you can add these to your ``credentials.yml``. - - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Facebook credentials:** -You need to set up a Facebook app and a page. - - 1. To create the app head over to - `Facebook for Developers <https://developers.facebook.com/>`_ - and click on **My Apps** → **Add New App**. - 2. Go onto the dashboard for the app and under **Products**, - find the **Messenger** section and click **Set Up**. Scroll down to - **Token Generation** and click on the link to create a new page for your - app. - 3. Create your page and select it in the dropdown menu for the - **Token Generation**. The shown **Page Access Token** is the - ``page-access-token`` needed later on. - 4. Locate the **App Secret** in the app dashboard under **Settings** → **Basic**. - This will be your ``secret``. - 5. Use the collected ``secret`` and ``page-access-token`` in your - ``credentials.yml``, and add a field called ``verify`` containing - a string of your choice. Start ``rasa run`` with the - ``--credentials credentials.yml`` option. - 6. Set up a **Webhook** and select at least the **messaging** and - **messaging_postback** subscriptions. Insert your callback URL which will - look like ``https://<YOUR_HOST>/webhooks/facebook/webhook``. Insert the - **Verify Token** which has to match the ``verify`` - entry in your ``credentials.yml``. - - -For more detailed steps, visit the -`Messenger docs <https://developers.facebook.com/docs/graph-api/webhooks>`_. - - -Running On Facebook Messenger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to Facebook using the run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - -The endpoint for receiving Facebook messenger messages is -``http://localhost:5005/webhooks/facebook/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the configuration of the webhook. diff --git a/docs/user-guide/connectors/mattermost.rst b/docs/user-guide/connectors/mattermost.rst deleted file mode 100644 index cbbab6007bf9..000000000000 --- a/docs/user-guide/connectors/mattermost.rst +++ /dev/null @@ -1,57 +0,0 @@ -:desc: Build a Rasa Chat Bot on Mattermost - -.. _mattermost: - -Mattermost ----------- - -.. edit-link:: - -You first have to create a mattermost app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to set up the outgoing webhook:** - - 1. To create the Mattermost outgoing webhook, login to your Mattermost - team site and go to **Main Menu > Integrations > Outgoing Webhooks**. - 2. Click **Add outgoing webhook**. - 3. Fill out the details including the channel you want the bot in. - You will need to ensure the **trigger words** section is set up - with ``@yourbotname`` so that the bot doesn't trigger on everything - that is said. - 4. Make sure **trigger when** is set to value - **first word matches a trigger word exactly**. - 5. The callback url needs to be your ngrok url where you - have your webhook running in Core or your public address, e.g. - ``http://test.example.com/webhooks/mattermost/webhook``. - - -For more detailed steps, visit the -`Mattermost docs <https://docs.mattermost.com/guides/developer.html>`_. - -Running on Mattermost -^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the Mattermost input channel using the -run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - mattermost: - url: "https://chat.example.com/api/v4" - team: "community" - user: "user@user.com" - pw: "password" - -The endpoint for receiving Mattermost channel messages -is ``/webhooks/mattermost/webhook``. This is the url you should -add in the Mattermost outgoing webhook. diff --git a/docs/user-guide/connectors/microsoft-bot-framework.rst b/docs/user-guide/connectors/microsoft-bot-framework.rst deleted file mode 100644 index 7c9dd31713bc..000000000000 --- a/docs/user-guide/connectors/microsoft-bot-framework.rst +++ /dev/null @@ -1,29 +0,0 @@ -:desc: Build a Rasa Chat Bot on Microsoft Bot Framework - -.. _microsoft-bot-framework: - -Microsoft Bot Framework -======================= - -.. edit-link:: - -You first have to create a Microsoft app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Running on Microsoft Bot Framework -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the botframework input channel using the -run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - botframework: - app_id: "MICROSOFT_APP_ID" - app_password: "MICROSOFT_APP_PASSWORD" diff --git a/docs/user-guide/connectors/rocketchat.rst b/docs/user-guide/connectors/rocketchat.rst deleted file mode 100644 index 4a63e7ea88d2..000000000000 --- a/docs/user-guide/connectors/rocketchat.rst +++ /dev/null @@ -1,52 +0,0 @@ -:desc: Build a Rasa Chat Bot on Rocketchat - -.. _rocketchat: - -RocketChat -========== - -.. edit-link:: - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to set up Rocket.Chat:** - - 1. Create a user that will be used to post messages, and set its - credentials at credentials file. - 2. Create a Rocket.Chat outgoing webhook by logging in as admin to - Rocket.Chat and going to - **Administration > Integrations > New Integration**. - 3. Select **Outgoing Webhook**. - 4. Set **Event Trigger** section to value **Message Sent**. - 5. Fill out the details, including the channel you want the bot - listen to. Optionally, it is possible to set the - **Trigger Words** section with ``@yourbotname`` so that the bot - doesn't trigger on everything that is said. - 6. Set your **URLs** section to the Rasa URL where you have your - webhook running in Core or your public address with - ``/webhooks/rocketchat/webhook``, e.g. - ``http://test.example.com/webhooks/rocketchat/webhook``. - -For more information on the Rocket.Chat Webhooks, see the -`Rocket.Chat Guide <https://rocket.chat/docs/administrator-guides/integrations/>`_. - - -Running on RocketChat -^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the Rocket.Chat input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - rocketchat: - user: "yourbotname" - password: "YOUR_PASSWORD" - server_url: "https://demo.rocket.chat" diff --git a/docs/user-guide/connectors/slack.rst b/docs/user-guide/connectors/slack.rst deleted file mode 100644 index fca29a2587ed..000000000000 --- a/docs/user-guide/connectors/slack.rst +++ /dev/null @@ -1,59 +0,0 @@ -:desc: Build a Rasa Chat Bot on Slack - -.. _slack: - -Slack -===== - -.. edit-link:: - -You first have to create a Slack app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Slack credentials:** You need to set up a Slack app. - - 1. To create the app go to: https://api.slack.com/apps and click - on *"Create New App"*. - 2. Activate the following features: interactive components, event - subscriptions, bot users, permissions (for basic functionality - you should subscribe to the ``message.channel``, - ``message.groups``, ``message.im`` and ``message.mpim`` events) - 3. The ``slack_channel`` is the target your bot posts to. - This can be a channel or an individual person. You can leave out - the argument to post DMs to the bot. - 4. Use the entry for ``Bot User OAuth Access Token`` in the - "OAuth & Permissions" tab as your ``slack_token``. It should start - with ``xoxob``. - - -For more detailed steps, visit the -`Slack API docs <https://api.slack.com/incoming-webhooks>`_. - -Running on Slack -^^^^^^^^^^^^^^^^ - -If you want to connect to the slack input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - slack: - slack_token: "xoxb-286425452756-safjasdf7sl38KLls" - slack_channel: "#my_channel" - slack_retry_reason_header: "x-slack-retry-reason" #Slack HTTP header name indicating reason that slack send retry request. This configuration is optional. - slack_retry_number_header: "x-slack-retry-num" #Slack HTTP header name indicating the attempt number. This configuration is optional. - errors_ignore_retry: None #Any error codes given by Slack included in this list will be ignored. Error codes are listed `here <https://api.slack.com/events-api#errors>`_. - -The endpoint for receiving slack messages is -``http://localhost:5005/webhooks/slack/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the OAuth & Permissions section. diff --git a/docs/user-guide/connectors/telegram.rst b/docs/user-guide/connectors/telegram.rst deleted file mode 100644 index b333201342e2..000000000000 --- a/docs/user-guide/connectors/telegram.rst +++ /dev/null @@ -1,46 +0,0 @@ -:desc: Build a Rasa Chat Bot on Telegram - -.. _telegram: - -Telegram -======== - -.. edit-link:: - -You first have to create a Telegram bot to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Telegram credentials:** -You need to set up a Telegram bot. - - 1. To create the bot, go to `Bot Father <https://web.telegram.org/#/im?p=@BotFather>`_, - enter ``/newbot`` and follow the instructions. - 2. At the end you should get your ``access_token`` and the username you - set will be your ``verify``. - 3. If you want to use your bot in a group setting, it's advisable to - turn on group privacy mode by entering ``/setprivacy``. Then the bot - will only listen when a user's message starts with ``/bot``. - -For more information, check out the `Telegram HTTP API -<https://core.telegram.org/bots/api>`_. - -Running on Telegram -^^^^^^^^^^^^^^^^^^^ - -If you want to connect to telegram using the run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - telegram: - access_token: "490161424:AAGlRxinBRtKGb21_rlOEMtDFZMXBl6EC0o" - verify: "your_bot" - webhook_url: "https://your_url.com/webhooks/telegram/webhook" diff --git a/docs/user-guide/connectors/twilio.rst b/docs/user-guide/connectors/twilio.rst deleted file mode 100644 index ea47c01af1ec..000000000000 --- a/docs/user-guide/connectors/twilio.rst +++ /dev/null @@ -1,48 +0,0 @@ -:desc: Build a Rasa Chat Bot on Twilio - -.. _twilio: - -Twilio -====== - -.. edit-link:: - -You first have to create a Twilio app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Twilio credentials:** -You need to set up a Twilio account. - - 1. Once you have created a Twilio account, you need to create a new - project. The basic important product to select here - is ``Programmable SMS``. - 2. Once you have created the project, navigate to the Dashboard of - ``Programmable SMS`` and click on ``Get Started``. Follow the - steps to connect a phone number to the project. - 3. Now you can use the ``Account SID``, ``Auth Token``, and the phone - number you purchased in your ``credentials.yml``. - -For more information, see the `Twilio REST API -<https://www.twilio.com/docs/iam/api>`_. - -Using run script -^^^^^^^^^^^^^^^^ - -If you want to connect to the Twilio input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - twilio: - account_sid: "ACbc2dxxxxxxxxxxxx19d54bdcd6e41186" - auth_token: "e231c197493a7122d475b4xxxxxxxxxx" - twilio_number: "+440123456789" diff --git a/docs/user-guide/connectors/your-own-website.rst b/docs/user-guide/connectors/your-own-website.rst deleted file mode 100644 index 92dd169c75eb..000000000000 --- a/docs/user-guide/connectors/your-own-website.rst +++ /dev/null @@ -1,149 +0,0 @@ -:desc: Deploy and Run a Rasa Chat Bot on a Website - -.. _your-own-website: - -Your Own Website -================ - -.. edit-link:: - -If you just want an easy way for users to test your bot, the best option -is usually the chat interface that ships with Rasa X, where you can `invite users -to test your bot <../../rasa-x/docs/get-feedback-from-test-users>`_. - -If you already have an existing website and want to add a Rasa assistant to it, -you have a couple of options: - -- `Rasa Webchat <https://github.com/mrbot-ai/rasa-webchat>`_, which - uses websockets. -- `Chatroom <https://github.com/scalableminds/chatroom>`_, which - uses regular HTTP requests. -- `rasa-bot <https://github.com/assister-ai/assister/tree/master/packages/rasa>`_, a - Web Component which uses regular HTTP requests. - -.. note:: - - These projects are developed by external developers, if there are any issues with - them, please open tickets in the respective repositories. - -Websocket Channel -~~~~~~~~~~~~~~~~~ - -The SocketIO channel uses websockets and is real-time. You need to supply -a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - socketio: - user_message_evt: user_uttered - bot_message_evt: bot_uttered - session_persistence: true/false - -The first two configuration values define the event names used by Rasa Core -when sending or receiving messages over socket.io. - -By default, the socketio channel uses the socket id as ``sender_id``, which causes -the session to restart at every page reload. ``session_persistence`` can be -set to ``true`` to avoid that. In that case, the frontend is responsible -for generating a session id and sending it to the Rasa Core server by -emitting the event ``session_request`` with ``{session_id: [session_id]}`` -immediately after the ``connect`` event. - -The example `Webchat <https://github.com/mrbot-ai/rasa-webchat>`_ -implements this session creation mechanism (version >= 0.5.0). - - -.. _rest_channels: - -REST Channels -~~~~~~~~~~~~~ - - -The ``RestInput`` and ``CallbackInput`` channels can be used for custom integrations. -They provide a URL where you can post messages and either receive response messages -directly, or asynchronously via a webhook. - - -RestInput -^^^^^^^^^ - -The ``rest`` channel will provide you with a REST endpoint to post messages -to and in response to that request will send back the bots messages. -Here is an example on how to connect the ``rest`` input channel -using the run script: - -.. code-block:: bash - - rasa run - -you need to ensure your ``credentials.yml`` has the following content: - -.. code-block:: yaml - - rest: - # you don't need to provide anything here - this channel doesn't - # require any credentials - -After connecting the ``rest`` input channel, you can post messages to -``POST /webhooks/rest/webhook`` with the following format: - -.. code-block:: json - - { - "sender": "Rasa", - "message": "Hi there!" - } - -The response to this request will include the bot responses, e.g. - -.. code-block:: json - - [ - {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} - ] - - -.. _callbackInput: - -CallbackInput -^^^^^^^^^^^^^ - -The ``callback`` channel behaves very much like the ``rest`` input, -but instead of directly returning the bot messages to the HTTP -request that sends the message, it will call a URL you can specify -to send bot messages. - -Here is an example on how to connect the -``callback`` input channel using the run script: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - callback: - # URL to which Core will send the bot responses - url: "http://localhost:5034/bot" - -After connecting the ``callback`` input channel, you can post messages to -``POST /webhooks/callback/webhook`` with the following format: - -.. code-block:: json - - { - "sender": "Rasa", - "message": "Hi there!" - } - -The response will simply be ``success``. Once Core wants to send a -message to the user, it will call the URL you specified with a ``POST`` -and the following ``JSON`` body: - -.. code-block:: json - - [ - {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} - ] diff --git a/docs/user-guide/evaluating-models.rst b/docs/user-guide/evaluating-models.rst deleted file mode 100644 index 51398e5b1cfb..000000000000 --- a/docs/user-guide/evaluating-models.rst +++ /dev/null @@ -1,270 +0,0 @@ -:desc: Evaluate and validate your machine learning models for open source - library Rasa Core to improve the dialogue management of your contextual - AI Assistant. - -.. _evaluating-models: - -Evaluating Models -================= - -.. edit-link:: - -.. contents:: - :local: - -.. note:: - If you are looking to tune the hyperparameters of your NLU model, - check out this `tutorial <https://blog.rasa.com/rasa-nlu-in-depth-part-3-hyperparameters/>`_. - - -.. _nlu-evaluation: - -Evaluating an NLU Model ------------------------ - -A standard technique in machine learning is to keep some data separate as a *test set*. -You can :ref:`split your NLU training data <train-test-split>` -into train and test sets using: - -.. code-block:: bash - - rasa data split nlu - - -If you've done this, you can see how well your NLU model predicts the test cases using this command: - -.. code-block:: bash - - rasa test nlu -u test_set.md --model models/nlu-20180323-145833.tar.gz - - -If you don't want to create a separate test set, you can -still estimate how well your model generalises using cross-validation. -To do this, add the flag ``--cross-validation``: - -.. code-block:: bash - - rasa test nlu -u data/nlu.md --config config.yml --cross-validation - -The full list of options for the script is: - -.. program-output:: rasa test nlu --help - -.. _comparing-nlu-pipelines: - -Comparing NLU Pipelines -^^^^^^^^^^^^^^^^^^^^^^^ - -By passing multiple pipeline configurations (or a folder containing them) to the CLI, Rasa will run -a comparative examination between the pipelines. - -.. code-block:: bash - - $ rasa test nlu --config pretrained_embeddings_spacy.yml supervised_embeddings.yml - --nlu data/nlu.md --runs 3 --percentages 0 25 50 70 90 - - -The command in the example above will create a train/test split from your data, -then train each pipeline multiple times with 0, 25, 50, 70 and 90% of your intent data excluded from the training set. -The models are then evaluated on the test set and the f1-score for each exclusion percentage is recorded. This process -runs three times (i.e. with 3 test sets in total) and then a graph is plotted using the means and standard deviations of -the f1-scores. - -The f1-score graph - along with all train/test sets, the trained models, classification and error reports - will be saved into a folder -called ``nlu_comparison_results``. - - -Intent Classification -^^^^^^^^^^^^^^^^^^^^^ - -The evaluation script will produce a report, confusion matrix, -and confidence histogram for your model. - -The report logs precision, recall and f1 measure for -each intent and entity, as well as providing an overall average. -You can save these reports as JSON files using the ``--report`` argument. - -The confusion matrix shows you which -intents are mistaken for others; any samples which have been -incorrectly predicted are logged and saved to a file -called ``errors.json`` for easier debugging. - -The histogram that the script produces allows you to visualise the -confidence distribution for all predictions, -with the volume of correct and incorrect predictions being displayed by -blue and red bars respectively. -Improving the quality of your training data will move the blue -histogram bars to the right and the red histogram bars -to the left of the plot. - - -.. note:: - A confusion matrix will **only** be created if you are evaluating a model on a test set. - In cross-validation mode, the confusion matrix will not be generated. - -.. warning:: - If any of your entities are incorrectly annotated, your evaluation may fail. One common problem - is that an entity cannot stop or start inside a token. - For example, if you have an example for a ``name`` entity - like ``[Brian](name)'s house``, this is only valid if your tokenizer splits ``Brian's`` into - multiple tokens. A whitespace tokenizer would not work in this case. - - -Response Selection -^^^^^^^^^^^^^^^^^^^^^ - -The evaluation script will produce a combined report for all response selector models in your pipeline. - -The report logs precision, recall and f1 measure for -each response, as well as providing an overall average. -You can save these reports as JSON files using the ``--report`` argument. - - -Entity Extraction -^^^^^^^^^^^^^^^^^ - -The ``CRFEntityExtractor`` is the only entity extractor which you train using your own data, -and so is the only one that will be evaluated. If you use the spaCy or duckling -pre-trained entity extractors, Rasa NLU will not include these in the evaluation. - -Rasa NLU will report recall, precision, and f1 measure for each entity type that -``CRFEntityExtractor`` is trained to recognize. - - -Entity Scoring -^^^^^^^^^^^^^^ - -To evaluate entity extraction we apply a simple tag-based approach. We don't consider BILOU tags, but only the -entity type tags on a per token basis. For location entity like "near Alexanderplatz" we -expect the labels ``LOC LOC`` instead of the BILOU-based ``B-LOC L-LOC``. Our approach is more lenient -when it comes to evaluation, as it rewards partial extraction and does not punish the splitting of entities. -For example, given the aforementioned entity "near Alexanderplatz" and a system that extracts -"Alexanderplatz", our approach rewards the extraction of "Alexanderplatz" and punishes the missed out word "near". -The BILOU-based approach, however, would label this as a complete failure since it expects Alexanderplatz -to be labeled as a last token in an entity (``L-LOC``) instead of a single token entity (``U-LOC``). Note also that -a split extraction of "near" and "Alexanderplatz" would get full scores on our approach and zero on the -BILOU-based one. - -Here's a comparison between the two scoring mechanisms for the phrase "near Alexanderplatz tonight": - -================================================== ======================== =========================== -extracted Simple tags (score) BILOU tags (score) -================================================== ======================== =========================== -[near Alexanderplatz](loc) [tonight](time) loc loc time (3) B-loc L-loc U-time (3) -[near](loc) [Alexanderplatz](loc) [tonight](time) loc loc time (3) U-loc U-loc U-time (1) -near [Alexanderplatz](loc) [tonight](time) O loc time (2) O U-loc U-time (1) -[near](loc) Alexanderplatz [tonight](time) loc O time (2) U-loc O U-time (1) -[near Alexanderplatz tonight](loc) loc loc loc (2) B-loc I-loc L-loc (1) -================================================== ======================== =========================== - - -.. _core-evaluation: - -Evaluating a Core Model ------------------------ - -You can evaluate your trained model on a set of test stories -by using the evaluate script: - -.. code-block:: bash - - rasa test core --stories test_stories.md --out results - - -This will print the failed stories to ``results/failed_stories.md``. -We count any story as `failed` if at least one of the actions -was predicted incorrectly. - -In addition, this will save a confusion matrix to a file called -``results/story_confmat.pdf``. For each action in your domain, the confusion -matrix shows how often the action was correctly predicted and how often an -incorrect action was predicted instead. - -The full list of options for the script is: - -.. program-output:: rasa test core --help - - -Comparing Core Configurations ------------------------------ - -To choose a configuration for your core model, or to choose hyperparameters for a -specific policy, you want to measure how well Rasa Core will `generalise` -to conversations which it hasn't seen before. Especially in the beginning -of a project, you do not have a lot of real conversations to use to train -your bot, so you don't just want to throw some away to use as a test set. - -Rasa Core has some scripts to help you choose and fine-tune your policy configuration. -Once you are happy with it, you can then train your final configuration on your -full data set. To do this, you first have to train models for your different -configurations. Create two (or more) config files including the policies you want to -compare, and then use the ``compare`` mode of the train script to train your models: - -.. code-block:: bash - - $ rasa train core -c config_1.yml config_2.yml \ - -d domain.yml -s stories_folder --out comparison_models --runs 3 \ - --percentages 0 5 25 50 70 95 - -For each policy configuration provided, Rasa Core will be trained multiple times -with 0, 5, 25, 50, 70 and 95% of your training stories excluded from the training -data. This is done for multiple runs to ensure consistent results. - -Once this script has finished, you can use the evaluate script in ``compare`` -mode to evaluate the models you just trained: - -.. code-block:: bash - - $ rasa test core -m comparison_models --stories stories_folder - --out comparison_results --evaluate-model-directory - -This will evaluate each of the models on the provided stories -(can be either training or test set) and plot some graphs -to show you which policy performs best. By evaluating on the full set of stories, you -can measure how well Rasa Core is predicting the held-out stories. - -To compare single policies create config files containing only one policy each. -If you're not sure which policies to compare, we'd recommend trying out the -``EmbeddingPolicy`` and the ``KerasPolicy`` to see which one works better for -you. - -.. note:: - This training process can take a long time, so we'd suggest letting it run - somewhere in the background where it can't be interrupted. - - -.. _end_to_end_evaluation: - -End-to-End Evaluation ---------------------- - -Rasa lets you evaluate dialogues end-to-end, running through -test conversations and making sure that both NLU and Core make correct predictions. - -To do this, you need some stories in the end-to-end format, -which includes both the NLU output and the original text. -Here is an example: - -.. code-block:: story - - ## end-to-end story 1 - * greet: hello - - utter_ask_howcanhelp - * inform: show me [chinese](cuisine) restaurants - - utter_ask_location - * inform: in [Paris](location) - - utter_ask_price - - -If you've saved end-to-end stories as a file called ``e2e_stories.md``, -you can evaluate your model against them by running: - -.. code-block:: bash - - $ rasa test --stories e2e_stories.md --e2e - -.. note:: - - Make sure your model file in ``models`` is a combined ``core`` - and ``nlu`` model. If it does not contain an NLU model, Core will use - the default ``RegexInterpreter``. diff --git a/docs/user-guide/installation.rst b/docs/user-guide/installation.rst deleted file mode 100644 index 9bb73855f300..000000000000 --- a/docs/user-guide/installation.rst +++ /dev/null @@ -1,265 +0,0 @@ -:desc: Manage our open source NLU on premise to allow local intent recognition, - entity extraction and customisation of the language models. -:meta_image: https://i.imgur.com/nGF1K8f.jpg - -.. _installation: - -============ -Installation -============ - -.. edit-link:: - -Quick Installation -~~~~~~~~~~~~~~~~~~ - -You can install both Rasa and Rasa X using pip (requires Python 3.5.4 or higher). - -.. code-block:: bash - - $ pip3 install rasa-x --extra-index-url https://pypi.rasa.com/simple - -- Having trouble installing? Read our :ref:`step-by-step installation guide <installation_guide>`. -- You can also :ref:`build Rasa from source <build_from_source>`. -- For advanced installation options such as building from source and installation instructions for - custom pipelines, head over :ref:`here <pipeline_dependencies>`. - - -When you're done installing, you can head over to the tutorial! - -.. button:: - :text: Next Step: Tutorial - :link: ../rasa-tutorial/ - - - -| - -------------------------------------------- - -.. _installation_guide: - -Step-by-step Installation Guide -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Install the Python development environment ---------------------------------------------- - -Check if your Python environment is already configured: - -.. code-block:: bash - - $ python3 --version - $ pip3 --version - -If these packages are already installed, these commands should display version -numbers for each step, and you can skip to the next step. - -Otherwise, proceed with the instructions below to install them. - -.. tabs:: - - .. tab:: Ubuntu - - Fetch the relevant packages using ``apt``, and install virtualenv using ``pip``. - - .. code-block:: bash - - $ sudo apt update - $ sudo apt install python3-dev python3-pip - - .. tab:: macOS - - Install the `Homebrew <https://brew.sh>`_ package manager if you haven't already. - - Once you're done, you can install Python3. - - .. code-block:: bash - - $ brew update - $ brew install python - - .. tab:: Windows - - .. raw:: html - - Make sure the Microsoft VC++ Compiler is installed, so python can compile - any dependencies. You can get the compiler from <a class="reference external" - href="https://visualstudio.microsoft.com/visual-cpp-build-tools/" - target="_blank">Visual Studio</a>. Download the installer and select - VC++ Build tools in the list. - - Install `Python 3 <https://www.python.org/downloads/windows/>`_ (64-bit version) for Windows. - - .. code-block:: bat - - C:\> pip3 install -U pip - -.. note:: - Note that `pip` in this refers to `pip3` as Rasa requires python3. To see which version the `pip` - command on your machine calls use `pip --version`. - - -2. Create a virtual environment (strongly recommended) ------------------------------------------------------- - -Tools like `virtualenv <https://virtualenv.pypa.io/en/latest/>`_ and `virtualenvwrapper <https://virtualenvwrapper.readthedocs.io/en/latest/>`_ provide isolated Python environments, which are cleaner than installing packages systemwide (as they prevent dependency conflicts). They also let you install packages without root privileges. - -.. tabs:: - - .. tab:: Ubuntu / macOS - - Create a new virtual environment by choosing a Python interpreter and making a ``./venv`` directory to hold it: - - .. code-block:: bash - - $ python3 -m venv --system-site-packages ./venv - - Activate the virtual environment: - - .. code-block:: bash - - $ source ./venv/bin/activate - - .. tab:: Windows - - Create a new virtual environment by choosing a Python interpreter and making a ``.\venv`` directory to hold it: - - .. code-block:: bat - - C:\> python3 -m venv --system-site-packages ./venv - - Activate the virtual environment: - - .. code-block:: bat - - C:\> .\venv\Scripts\activate - - -3. Install Rasa and Rasa X --------------------------- - -.. tabs:: - - .. tab:: Rasa and Rasa X - - To install both Rasa and Rasa X in one go: - - .. code-block:: bash - - $ pip install rasa-x --extra-index-url https://pypi.rasa.com/simple - - .. tab:: Rasa only - - If you just want to install Rasa without Rasa X: - - .. code-block:: bash - - $ pip install rasa - -**Congratulations! You have successfully installed Rasa!** - -You can now head over to the tutorial. - -.. button:: - :text: Next Step: Tutorial - :link: ../rasa-tutorial/ - -| - -------------------------------------------- - - -.. _build_from_source: - -Building from Source -~~~~~~~~~~~~~~~~~~~~ - -If you want to use the development version of Rasa, you can get it from GitHub: - -.. code-block:: bash - - $ git clone https://github.com/RasaHQ/rasa.git - $ cd rasa - $ pip install -r requirements.txt - $ pip install -e . - --------------------------------- - -.. _pipeline_dependencies: - -NLU Pipeline Dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Several NLU components have additional dependencies that need to -be installed separately. - -Here, you will find installation instructions for each of them below. - -How do I choose a pipeline? ---------------------------- - -The page on :ref:`choosing-a-pipeline` will help you pick the right pipeline -for your assistant. - -I have decided on a pipeline. How do I install the dependencies for it? ------------------------------------------------------------------------ - -When you install Rasa, the dependencies for the ``supervised_embeddings`` - TensorFlow -and sklearn_crfsuite get automatically installed. However, spaCy and MITIE need to be separately installed if you want to use pipelines containing components from those libraries. - -.. admonition:: Just give me everything! - - If you don't mind the additional dependencies lying around, you can use - this to install everything. - - You'll first need to clone the repository and then run the following - command to install all the packages: - - .. code-block:: bash - - $ pip install -r alt_requirements/requirements_full.txt - - -Dependencies for spaCy -###################### - - -For more information on spaCy, check out the `spaCy docs <https://spacy.io/usage/models>`_. - -You can install it with the following commands: - -.. code-block:: bash - - $ pip install rasa[spacy] - $ python -m spacy download en_core_web_md - $ python -m spacy link en_core_web_md en - -This will install Rasa NLU as well as spacy and its language model -for the English language. We recommend using at least the -"medium" sized models (``_md``) instead of the spacy's -default small ``en_core_web_sm`` model. Small models require less -memory to run, but will somewhat reduce intent classification performance. - -.. _install-mitie: - -Dependencies for MITIE -###################### - -First, run - -.. code-block:: bash - - $ pip install git+https://github.com/mit-nlp/MITIE.git - $ pip install rasa[mitie] - -and then download the -`MITIE models <https://github.com/mit-nlp/MITIE/releases/download/v0.4/MITIE-models-v0.2.tar.bz2>`_. -The file you need is ``total_word_feature_extractor.dat``. Save this -anywhere. If you want to use MITIE, you need to -tell it where to find this file (in this example it was saved in the -``data`` folder of the project directory). - -.. warning:: - - Mitie support is likely to be deprecated in a future release. diff --git a/docs/user-guide/messaging-and-voice-channels.rst b/docs/user-guide/messaging-and-voice-channels.rst deleted file mode 100644 index 77576e32daf3..000000000000 --- a/docs/user-guide/messaging-and-voice-channels.rst +++ /dev/null @@ -1,46 +0,0 @@ -:desc: Check out how to make your Rasa assistant available on platforms like - Facebook Messenger, Slack, Telegram or even your very own website. - -.. _messaging-and-voice-channels: - -Messaging and Voice Channels -============================ - -.. edit-link:: - -If you're testing this on your local computer (i.e. not a server), you -will need to use `ngrok <https://rasa.com/docs/rasa-x/get-feedback-from-test-users/#use-ngrok-for-local-testing>`_. -This gives your machine a domain name so that Facebook, Slack, etc. know where to send messages to -reach your local machine. - - -To make your assistant available on a messaging platform you need to provide credentials -in a ``credentials.yml`` file. -An example file is created when you run ``rasa init``, so it's easiest to edit that file -and add your credentials there. Here is an example with Facebook credentials: - - -.. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - -Learn how to make your assistant available on: - -.. toctree:: - :titlesonly: - :maxdepth: 1 - - connectors/your-own-website - connectors/facebook-messenger - connectors/slack - connectors/telegram - connectors/twilio - connectors/microsoft-bot-framework - connectors/cisco-webex-teams - connectors/rocketchat - connectors/mattermost - connectors/custom-connectors diff --git a/docs/user-guide/rasa-tutorial.rst b/docs/user-guide/rasa-tutorial.rst deleted file mode 100644 index e851779ba61c..000000000000 --- a/docs/user-guide/rasa-tutorial.rst +++ /dev/null @@ -1,262 +0,0 @@ -:desc: This tutorial will show you the different parts needed to build a - chatbot or AI assistant using open source Rasa. - -.. _rasa-tutorial: - -Rasa Tutorial -============= - -.. edit-link:: - -This page explains the basics of building an assistant with Rasa and -shows the structure of a Rasa project. You can test it out right here without -installing anything. -You can also :ref:`install Rasa <installation>` and follow along in your command line. - -.. raw:: html - - The <a style="text-decoration: none" href="https://rasa.com/docs/rasa/glossary">glossary</a> contains an overview of the most common terms you’ll see in the Rasa documentation. - - - -.. contents:: - :local: - - -In this tutorial, you will build a simple, friendly assistant which will ask how you're doing -and send you a fun picture to cheer you up if you are sad. - -.. image:: /_static/images/mood_bot.png - - -1. Create a New Project -^^^^^^^^^^^^^^^^^^^^^^^ - -The first step is to create a new Rasa project. To do this, run: - - - -.. runnable:: - - rasa init --no-prompt - - -The ``rasa init`` command creates all the files that a Rasa project needs and -trains a simple bot on some sample data. -If you leave out the ``--no-prompt`` flag you will be asked some questions about -how you want your project to be set up. - -This creates the following files: - - -+-------------------------------+--------------------------------------------------------+ -| ``__init__.py`` | an empty file that helps python find your actions | -+-------------------------------+--------------------------------------------------------+ -| ``actions.py`` | code for your custom actions | -+-------------------------------+--------------------------------------------------------+ -| ``config.yml`` '*' | configuration of your NLU and Core models | -+-------------------------------+--------------------------------------------------------+ -| ``credentials.yml`` | details for connecting to other services | -+-------------------------------+--------------------------------------------------------+ -| ``data/nlu.md`` '*' | your NLU training data | -+-------------------------------+--------------------------------------------------------+ -| ``data/stories.md`` '*' | your stories | -+-------------------------------+--------------------------------------------------------+ -| ``domain.yml`` '*' | your assistant's domain | -+-------------------------------+--------------------------------------------------------+ -| ``endpoints.yml`` | details for connecting to channels like fb messenger | -+-------------------------------+--------------------------------------------------------+ -| ``models/<timestamp>.tar.gz`` | your initial model | -+-------------------------------+--------------------------------------------------------+ - - - -The most important files are marked with a '*'. -You will learn about all of these in this tutorial. - - -2. View Your NLU Training Data -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The first piece of a Rasa assistant is an NLU model. -NLU stands for Natural Language Understanding, which means turning -user messages into structured data. To do this with Rasa, -you provide training examples that show how Rasa should understand -user messages, and then train a model by showing it those examples. - -Run the code cell below to see the NLU training data created by -the ``rasa init`` command: - - -.. runnable:: - - cat data/nlu.md - - - - -The lines starting with ``##`` define the names of your ``intents``, which -are groups of messages with the same meaning. Rasa's job will be to -predict the correct intent when your users send new, unseen messages to -your assistant. You can find all the details of the data format in :ref:`training-data-format`. - -.. _model-configuration: - -3. Define Your Model Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The configuration file defines the NLU and Core components that your model -will use. In this example, your NLU model will use the -``supervised_embeddings`` pipeline. You can learn about the different NLU pipelines -:ref:`here <choosing-a-pipeline>`. - -Let's take a look at your model configuration file. - -.. runnable:: - - cat config.yml - - - -The ``language`` and ``pipeline`` keys specify how the NLU model should be built. -The ``policies`` key defines the :ref:`policies <policies>` that the Core model will use. - - - -4. Write Your First Stories -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -At this stage, you will teach your assistant how to respond to your messages. -This is called dialogue management, and is handled by your Core model. - -Core models learn from real conversational data in the form of training "stories". -A story is a real conversation between a user and an assistant. -Lines with intents and entities reflect the user's input and action names show what the -assistant should do in response. - -Below is an example of a simple conversation. -The user says hello, and the assistant says hello back. -This is how it looks as a story: - -.. code-block:: story - - ## story1 - * greet - - utter_greet - - -You can see the full details in :ref:`stories`. - -Lines that start with ``-`` are actions taken by the assistant. -In this tutorial, all of our actions are messages sent back to the user, -like ``utter_greet``, but in general, an action can do anything, -including calling an API and interacting with the outside world. - -Run the command below to view the example stories inside the file ``data/stories.md``: - - -.. runnable:: - - cat data/stories.md - - - -5. Define a Domain -^^^^^^^^^^^^^^^^^^ - -The next thing we need to do is define a :ref:`Domain <domains>`. -The domain defines the universe your assistant lives in: what user inputs it -should expect to get, what actions it should be able to predict, how to -respond, and what information to store. -The domain for our assistant is saved in a -file called ``domain.yml``: - - - -.. runnable:: - - cat domain.yml - - - -So what do the different parts mean? - - -+---------------+-------------------------------------------------------------+ -| ``intents`` | things you expect users to say | -+---------------+-------------------------------------------------------------+ -| ``actions`` | things your assistant can do and say | -+---------------+-------------------------------------------------------------+ -| ``templates`` | template strings for the things your assistant can say | -+---------------+-------------------------------------------------------------+ - - -**How does this fit together?** -Rasa Core's job is to choose the right action to execute at each step -of the conversation. In this case, our actions simply send a message to the user. -These simple utterance actions are the ``actions`` in the domain that start -with ``utter_``. The assistant will respond with a message based on a template -from the ``templates`` section. See :ref:`custom-actions` -to build actions that do more than just send a message. - - - -6. Train a Model -^^^^^^^^^^^^^^^^ - -Anytime we add new NLU or Core data, or update the domain or configuration, we -need to re-train a neural network on our example stories and NLU data. -To do this, run the command below. This command will call the Rasa Core and NLU train -functions and store the trained model -into the ``models/`` directory. The command will automatically only retrain the -different model parts if something has changed in their data or configuration. - - - -.. runnable:: - - rasa train - - echo "Finished training." - - - -The ``rasa train`` command will look for both NLU and Core data and will train a combined model. - - -7. Talk to Your Assistant -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Congratulations! 🚀 You just built an assistant -powered entirely by machine learning. - -The next step is to try it out! -If you're following this tutorial on your local machine, start talking to your -assistant by running: - -.. code-block:: bash - - rasa shell - - -Next Steps -^^^^^^^^^^ - -Now that you've built your first Rasa bot it's time to learn about -some more advanced Rasa features. - -- Learn how to implement business logic using :ref:`forms <forms>` -- Learn how to integrate other APIs using :ref:`custom actions <actions>` -- Learn how to connect your bot to different :ref:`messaging apps <messaging-and-voice-channels>` -- Learn about customising the :ref:`components <components>` in your NLU pipeline -- Read about custom and built-in :ref:`entities <entity-extraction>` - -You can also use Rasa X to collect more conversations -and improve your assistant: - -.. button:: - :text: Try Rasa X - :link: ../../../rasa-x/ - -.. juniper:: - :language: bash diff --git a/docs/user-guide/running-rasa-with-docker.rst b/docs/user-guide/running-rasa-with-docker.rst deleted file mode 100644 index 6fa84d4d6307..000000000000 --- a/docs/user-guide/running-rasa-with-docker.rst +++ /dev/null @@ -1,467 +0,0 @@ -:desc: Run and ship your Rasa assistant with Docker containers on any - Docker-compatible machine or cluster. - -.. _running-rasa-with-docker: - -Running Rasa with Docker -======================== - -.. edit-link:: - -This is a guide on how to build a Rasa assistant with Docker. -If you haven't used Rasa before, we'd recommend that you start with the :ref:`rasa-tutorial`. - -.. contents:: - :local: - -Installing Docker ------------------ - -If you're not sure if you have Docker installed, you can check by running: - - .. code-block:: bash - - docker -v && docker-compose -v - # Docker version 18.09.2, build 6247962 - # docker-compose version 1.23.2, build 1110ad01 - -If Docker is installed on your machine, the output should show you your installed -versions of Docker and Docker Compose. If the command doesn't work, you'll have to -install Docker. -See `Docker Installation <https://docs.docker.com/install/>`_ for details. - -Building an Assistant with Rasa and Docker ------------------------------------------- - -This section will cover the following: - - - Setting up your Rasa project and training an initial model - - Talking to your AI assistant via Docker - - Choosing a Docker image tag - - Training your Rasa models using Docker - - Talking to your assistant using Docker - - Running a Rasa server with Docker - - -Setup -~~~~~ - -Just like in the :ref:`tutorial <rasa-tutorial>`, you'll use the ``rasa init`` command to create a project. -The only difference is that you'll be running Rasa inside a Docker container, using -the image ``rasa/rasa``. To initialize your project, run: - -.. code-block:: bash - - docker run -v $(pwd):/app rasa/rasa init --no-prompt - -What does this command mean? - -- ``-v $(pwd):/app`` mounts your current working directory to the working directory - in the Docker container. This means that files you create on your computer will be - visible inside the container, and files created in the container will - get synced back to your computer. -- ``rasa/rasa`` is the name of the docker image to run. -- the Docker image has the ``rasa`` command as its entrypoint, which means you don't - have to type ``rasa init``, just ``init`` is enough. - -Running this command will produce a lot of output. What happens is: - -- a Rasa project is created -- an initial model is trained using the project's training data. - -To check that the command completed correctly, look at the contents of your working directory: - -.. code-block:: bash - - ls -1 - -The initial project files should all be there, as well as a ``models`` directory that contains your trained model. - - -.. note:: - - By default Docker runs containers as ``root`` user. Hence, all files created by - these containers will be owned by ``root``. See the `documentation of docker - <https://docs.docker.com/v17.12/edge/engine/reference/commandline/run/>`_ - and `docker-compose <https://docs.docker.com/compose/compose-file/>`_ if you want to - run the containers with a different user. - -Talking to Your Assistant -~~~~~~~~~~~~~~~~~~~~~~~~~ - -To talk to your newly-trained assistant, run this command: - - -.. code-block:: bash - - docker run -it -v $(pwd):/app rasa/rasa shell - -This will start a shell where you can chat to your assistant. -Note that this command includes the flags ``-it``, which means that you are running -Docker interactively, and you are able to give input via the command line. -For commands which require interactive input, like ``rasa shell`` and ``rasa interactive``, -you need to pass the ``-it`` flags. - - -Customizing your Model ----------------------- - -Choosing a Tag -~~~~~~~~~~~~~~ - -To keep images as small as possible, we publish different tags of the ``rasa/rasa`` image -with different dependencies installed. See :ref:`choosing-a-pipeline` for more information -about depedencies. - -All tags start with a version -- the ``latest`` tag corresponds to the current master build. -The tags are: - -- ``{version}`` -- ``{version}-spacy-en`` -- ``{version}-spacy-de`` -- ``{version}-mitie-en`` -- ``{version}-full`` - -The plain ``{version}`` tag includes all the dependencies you need to run the ``supervised_embeddings`` pipeline. -If you are using components with pre-trained word vectors, you need to choose the corresponding tag. -Alternatively, you can use the ``-full`` tag, which includes all pipeline dependencies. - -.. note:: - - You can see a list of all the versions and tags of the Rasa Docker image - `here <https://hub.docker.com/r/rasa/rasa/>`_. - - -.. _model_training_docker: - -Training a Custom Rasa Model with Docker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``config.yml`` file to use the pipeline you want, and place -your NLU and Core data into the ``data/`` directory. -Now you can train your Rasa model by running: - -.. code-block:: bash - - docker run \ - -v $(pwd):/app \ - rasa/rasa:latest-full \ - train \ - --domain domain.yml \ - --data data \ - --out models - -Here's what's happening in that command: - - - ``-v $(pwd):/app``: Mounts your project directory into the Docker - container so that Rasa can train a model on your training data - - ``rasa/rasa:latest-full``: Use the Rasa image with the tag ``latest-full`` - - ``train``: Execute the ``rasa train`` command within the container. For more - information see :ref:`command-line-interface`. - -In this case, we've also passed values for the location of the domain file, training -data, and the models output directory to show how these can be customized. -You can also leave these out since we are passing the default values. - -.. note:: - - If you are using a custom NLU component or policy, you have to add the module file to your - Docker container. You can do this by either mounting the file or by including it in your - own custom image (e.g. if the custom component or policy has extra dependencies). Make sure - that your module is in the Python module search path by setting the - environment variable ``PYTHONPATH=$PYTHONPATH:<directory of your module>``. - - -Running the Rasa Server ------------------------ - -To run your AI assistant in production, configure your required -:ref:`messaging-and-voice-channels` in ``credentials.yml``. If this file does not -exist, create it using: - -.. code-block:: bash - - touch credentials.yml - -Then edit it according to your connected channels. -After, run the trained model with: - -.. code-block:: bash - - docker run \ - -v $(pwd)/models:/app/models \ - rasa/rasa:latest-full \ - run - -Command Description: - - - ``-v $(pwd)/models:/app/models``: Mounts the directory with the trained Rasa model - in the container - - ``rasa/rasa:latest-full``: Use the Rasa image with the tag ``latest-full`` - - ``run``: Executes the ``rasa run`` command. For more information see - :ref:`command-line-interface`. - - -Using Docker Compose to Run Multiple Services ---------------------------------------------- - -To run Rasa together with other services, such as a server for custom actions, it is -recommend to use `Docker Compose <https://docs.docker.com/compose/>`_. -Docker Compose provides an easy way to run multiple containers together without -having to run multiple commands. - -Start by creating a file called ``docker-compose.yml``: - -.. code-block:: bash - - touch docker-compose.yml - -Add the following content to the file: - -.. code-block:: yaml - - version: '3.0' - services: - rasa: - image: rasa/rasa:latest-full - ports: - - 5005:5005 - volumes: - - ./:/app - command: - - run - - -The file starts with the version of the Docker Compose specification that you -want to use. -Each container is declared as a ``service`` within the docker-compose file. -The first service is the ``rasa`` service. - -The command is similar to the ``docker run`` command. -The ``ports`` part defines a port mapping between the container and your host -system. In this case it makes ``5005`` of the ``rasa`` service available on -port ``5005`` of your host. -This is the port of the :ref:`REST Channel <rest_channels>` interface of Rasa. - -.. note:: - - Since Docker Compose starts a set of Docker containers, it is no longer - possible to connect to the command line of a single container after executing the - ``run`` command. - -To run the services configured in your ``docker-compose.yml`` execute: - -.. code-block:: bash - - docker-compose up - - -Adding Custom Actions ---------------------- - -To create more sophisticated assistants, you will want to use :ref:`custom-actions`. -Continuing the example from above, you might want to add an action which tells -the user a joke to cheer them up. - -Creating a Custom Action -~~~~~~~~~~~~~~~~~~~~~~~~ - -Start by creating the custom actions in a directory ``actions``: - -.. code-block:: bash - - mkdir actions - # Rasa SDK expects a python module. - # Therefore, make sure that you have this file in the directory. - touch actions/__init__.py - touch actions/actions.py - -Then build a custom action using the Rasa SDK, e.g.: - -.. code-block:: python - - import requests - import json - from rasa_sdk import Action - - - class ActionJoke(Action): - def name(self): - return "action_joke" - - def run(self, dispatcher, tracker, domain): - request = requests.get('http://api.icndb.com/jokes/random').json() # make an api call - joke = request['value']['joke'] # extract a joke from returned json response - dispatcher.utter_message(joke) # send the message back to the user - return [] - -Next, add the custom action in your stories and your domain file. -Continuing with the example bot from ``rasa init``, replace ``utter_cheer_up`` in -``data/stories.md`` with the custom action ``action_joke``, and add -``action_joke`` to the actions in the domain file. - -Adding the Action Server -~~~~~~~~~~~~~~~~~~~~~~~~ - -The custom actions are run by the action server. -To spin it up together with the Rasa instance, add a service -``action_server`` to the ``docker-compose.yml``: - -.. code-block:: yaml - :emphasize-lines: 11-14 - - version: '3.0' - services: - rasa: - image: rasa/rasa:latest-full - ports: - - 5005:5005 - volumes: - - ./:/app - command: - - run - action_server: - image: rasa/rasa-sdk:latest - volumes: - - ./actions:/app/actions - -This pulls the image for the Rasa SDK which includes the action server, -mounts your custom actions into it, and starts the server. - -To instruct Rasa to use the action server you have to tell Rasa its location. -Add this to your ``endpoints.yml`` (if it does not exist, create it): - -.. code-block:: yaml - - action_endpoint: - url: http://action_server:5055/webhook - -Run ``docker-compose up`` to start the action server together -with Rasa. - -Adding Custom Dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If your custom action has additional dependencies of systems or Python libraries, -you can add these by extending the official image. - -To do so, create a file named ``Dockerfile`` in which you extend the official -image and add your custom dependencies. For example: - -.. code-block:: docker - - # Extend the official Rasa SDK image - FROM rasa/rasa-sdk:latest - - # Add a custom system library (e.g. git) - RUN apt-get update && \ - apt-get install -y git - - # Add a custom python library (e.g. jupyter) - RUN pip install --no-cache-dir jupyter - -You can then build the image via the following command, and use it in your -``docker-compose.yml`` instead of the ``rasa/rasa-sdk`` image. - -.. code-block:: bash - - docker build . -t <name of your custom image>:<tag of your custom image> - -Adding a Custom Tracker Store ------------------------------ - -By default, all conversations are saved in memory. This means that all -conversations are lost as soon as you restart the Rasa server. -If you want to persist your conversations, you can use a different -:ref:`Tracker Store <tracker-stores>`. - -Using PostgreSQL as Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Start by adding PostgreSQL to your docker-compose file: - -.. code-block:: yaml - - postgres: - image: postgres:latest - -Then add PostgreSQL to the ``tracker_store`` section of your endpoint -configuration ``config/endpoints.yml``: - -.. code-block:: yaml - - tracker_store: - type: sql - dialect: "postgresql" - url: postgres - db: rasa - -Using MongoDB as Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Start by adding MongoDB to your docker-compose file. The following example -adds the MongoDB as well as a UI (you can skip this), which will be available -at ``localhost:8081``. Username and password for the MongoDB instance are -specified as ``rasa`` and ``example``. - -.. code-block:: yaml - - mongo: - image: mongo - environment: - MONGO_INITDB_ROOT_USERNAME: rasa - MONGO_INITDB_ROOT_PASSWORD: example - mongo-express: - image: mongo-express - ports: - - 8081:8081 - environment: - ME_CONFIG_MONGODB_ADMINUSERNAME: rasa - ME_CONFIG_MONGODB_ADMINPASSWORD: example - -Then add the MongoDB to the ``tracker_store`` section of your endpoints -configuration ``endpoints.yml``: - -.. code-block:: yaml - - tracker_store: - type: mongod - url: mongodb://mongo:27017 - username: rasa - password: example - -Then start all components with ``docker-compose up``. - -Using Redis as Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Start by adding Redis to your docker-compose file: - -.. code-block:: yaml - - redis: - image: redis:latest - -Then add Redis to the ``tracker_store`` section of your endpoint -configuration ``endpoints.yml``: - -.. code-block:: yaml - - tracker_store: - type: redis - url: redis - -Using a Custom Tracker Store Implementation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you have a custom implementation of a tracker store you have two options -to add this store to Rasa: - - - extending the Rasa image - - mounting it as volume - -Then add the required configuration to your endpoint configuration -``endpoints.yml`` as it is described in :ref:`tracker-stores`. -If you want the tracker store component (e.g. a certain database) to be part -of your Docker Compose file, add a corresponding service and configuration -there. diff --git a/docs/user-guide/running-the-server.rst b/docs/user-guide/running-the-server.rst deleted file mode 100644 index 97c0c9229ba1..000000000000 --- a/docs/user-guide/running-the-server.rst +++ /dev/null @@ -1,238 +0,0 @@ -:desc: Find out how to use Rasa's HTTP API to integrate Rasa - with your backend components. - -.. _running-the-server: - -Running the Server -================== - -.. edit-link:: - -.. contents:: - :local: - -Running the HTTP server ------------------------ - -You can run a simple HTTP server that handles requests using your -trained Rasa model with: - -.. code-block:: bash - - rasa run -m models --enable-api --log-file out.log - -All the endpoints this API exposes are documented in :ref:`http-api`. - -The different parameters are: - -- ``-m``: the path to the folder containing your Rasa model, -- ``--enable-api``: enable this additional API, and -- ``--log-file``: the path to the log file. - -Rasa can load your model in three different ways: - -1. Fetch the model from a server (see :ref:`server_fetch_from_server`), or -2. Fetch the model from a remote storage (see :ref:`cloud-storage`). -3. Load the model specified via ``-m`` from your local storage system, - -Rasa tries to load a model in the above mentioned order, i.e. it only tries to load your model from your local -storage system if no model server and no remote storage were configured. - -.. warning:: - - Make sure to secure your server, either by restricting access to the server (e.g. using firewalls), or - by enabling an authentication method: :ref:`server_security`. - - -.. note:: - - If you are using custom actions, make sure your action server is - running (see :ref:`run-action-server`). If your actions are running - on a different machine, or you aren't using the Rasa SDK, make sure - to update your ``endpoints.yml`` file. - - -.. note:: - - If you start the server with an NLU-only model, not all the available endpoints - can be called. Be aware that some endpoints will return a 409 status code, as a trained - Core model is needed to process the request. - - -.. _server_fetch_from_server: - -Fetching Models from a Server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can configure the HTTP server to fetch models from another URL: - -.. code-block:: bash - - rasa run --enable-api --log-file out.log --endpoints my_endpoints.yml - -The model server is specified in the endpoint configuration -(``my_endpoints.yml``), where you specify the server URL Rasa -regularly queries for zipped Rasa models: - -.. code-block:: yaml - - models: - url: http://my-server.com/models/default@latest - wait_time_between_pulls: 10 # [optional](default: 100) - -.. note:: - - If you want to pull the model just once from the server, set - ``wait_time_between_pulls`` to ``None``. - -.. note:: - - Your model server must provide zipped Rasa models, and have - ``{"ETag": <model_hash_string>}`` as one of its headers. Rasa will - only download a new model if this model hash has changed. - -Rasa sends requests to your model server with an ``If-None-Match`` -header that contains the current model hash. If your model server can -provide a model with a different hash from the one you sent, it should send it -in as a zip file with an ``ETag`` header containing the new hash. If not, Rasa -expects an empty response with a ``204`` or ``304`` status code. - -An example request Rasa might make to your model server looks like this: - -.. code-block:: bash - - $ curl --header "If-None-Match: d41d8cd98f00b204e9800998ecf8427e" http://my-server.com/models/default@latest - - -.. _server_fetch_from_remote_storage: - -Fetching Models from a Remote Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can also configure the Rasa server to fetch your model from a remote storage: - -.. code-block:: bash - - rasa run -m 20190506-100418.tar.gz --enable-api --log-file out.log --remote-storage aws - -The model will be downloaded and stored in a temporary directory on your local storage system. -For more information see :ref:`cloud-storage`. - -.. _server_ssl: - -Configuring SSL / HTTPS ------------------------ - -By default the Rasa server is using HTTP for its communication. To secure the -communication with SSL, you need to provide a valid certificate and the corresponding -private key file. - -You can specify these files as part of the ``rasa run`` command: - -.. code-block:: bash - - rasa run --ssl-certificate myssl.crt --ssl-keyfile myssl.key - -If you encrypted your keyfile with a password during creation, you need to add -this password to the command: - -.. code-block:: bash - - rasa run --ssl-certificate myssl.crt --ssl-keyfile myssl.key --ssl-password mypassword - - -.. _server_security: - -Security Considerations ------------------------ - -We recommend to not expose the Rasa Server to the outside world, but -rather connect to it from your backend over a private connection (e.g. -between docker containers). - -Nevertheless, there are two authentication methods built in: - -**Token Based Auth:** - -Pass in the token using ``--auth-token thisismysecret`` when starting -the server: - -.. code-block:: bash - - rasa run \ - -m models \ - --enable-api \ - --log-file out.log \ - --auth-token thisismysecret - -Your requests should pass the token, in our case ``thisismysecret``, -as a parameter: - -.. code-block:: bash - - $ curl -XGET localhost:5005/conversations/default/tracker?token=thisismysecret - -**JWT Based Auth:** - -Enable JWT based authentication using ``--jwt-secret thisismysecret``. -Requests to the server need to contain a valid JWT token in -the ``Authorization`` header that is signed using this secret -and the ``HS256`` algorithm. - -The user must have ``username`` and ``role`` attributes. -If the ``role`` is ``admin``, all endpoints are accessible. -If the ``role`` is ``user``, endpoints with a ``sender_id`` parameter are only accessible -if the ``sender_id`` matches the user's ``username``. - -.. code-block:: bash - - rasa run \ - -m models \ - --enable-api \ - --log-file out.log \ - --jwt-secret thisismysecret - - -Your requests should have set a proper JWT header: - -.. code-block:: text - - "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ" - "zdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIi" - "wiaWF0IjoxNTE2MjM5MDIyfQ.qdrr2_a7Sd80gmCWjnDomO" - "Gl8eZFVfKXA6jhncgRn-I" - - - - -Endpoint Configuration ----------------------- - -To connect Rasa to other endpoints, you can specify an endpoint -configuration within a YAML file. -Then run Rasa with the flag -``--endpoints <path to endpoint configuration.yml>``. - -For example: - -.. code-block:: bash - - rasa run \ - --m <Rasa model> \ - --endpoints <path to endpoint configuration>.yml - -.. note:: - You can use environment variables within configuration files by specifying them with ``${name of environment variable}``. - These placeholders are then replaced by the value of the environment variable. - -Connecting a Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure a tracker store within your endpoint configuration, -see :ref:`tracker-stores`. - -Connecting an Event Broker -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure an event broker within your endpoint configuration, -see :ref:`event-brokers`. diff --git a/docs/user-guide/validate-files.rst b/docs/user-guide/validate-files.rst deleted file mode 100644 index 2a9d2017abae..000000000000 --- a/docs/user-guide/validate-files.rst +++ /dev/null @@ -1,58 +0,0 @@ -:desc: Check your domain, stories and intent files for possible errors. - -.. _validate-files: - -Validate Data -============= - -.. edit-link:: - - -Test Domain and Data Files for Mistakes ---------------------------------------- - -To verify if there are any mistakes in your domain file, NLU data, or story data, run the validate script. -You can run it with the following command: - -.. code-block:: bash - - rasa data validate - -The script above runs all the validations on your files. Here is the list of options to -the script: - -.. program-output:: rasa data validate --help - -You can also run these validations through the Python API by importing the `Validator` class, -which has the following methods: - -**from_files():** Creates the instance from string paths to the necessary files. - -**verify_intents():** Checks if intents listed in domain file are consistent with the NLU data. - -**verify_intents_in_stories():** Verification for intents in the stories, to check if they are valid. - -**verify_utterances():** Checks domain file for consistency between utterance templates and utterances listed under -actions. - -**verify_utterances_in_stories():** Verification for utterances in stories, to check if they are valid. - -**verify_all():** Runs all verifications above. - -To use these functions it is necessary to create a `Validator` object and initialize the logger. See the following code: - -.. code-block:: python - - import logging - from rasa import utils - from rasa.core.validator import Validator - - logger = logging.getLogger(__name__) - - utils.configure_colored_logging('DEBUG') - - validator = Validator.from_files(domain_file='domain.yml', - nlu_data='data/nlu_data.md', - stories='data/stories.md') - - validator.verify_all() diff --git a/docs/utils/StoryLexer.py b/docs/utils/StoryLexer.py deleted file mode 100644 index 949a68e571ae..000000000000 --- a/docs/utils/StoryLexer.py +++ /dev/null @@ -1,64 +0,0 @@ -from pygments.lexer import RegexLexer, bygroups, using, default, include -from pygments.lexers.data import JsonLexer -from pygments.token import Keyword, Comment, Token, Text, Generic, Name - - -class StoryLexer(RegexLexer): - """Lexer for the Rasa Core story file format. - Used for syntax highlighting of story snippets in the docs.""" - - name = "Story" - aliases = ["story"] - filenames = ["*.md"] - - tokens = { - "comment": [ - ( - r"(\s*<!--)((?:.*?\n?)*)(-->)", - bygroups(Keyword, Comment.MultiLine, Keyword), - ) - ], - "root": [ - include("comment"), - (r"\s*-\s*(slot)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(restart)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(rewind)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(reset_slots)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(reminder)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(undo)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(export)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(pause)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(resume)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(utter_[^\s]*)", Token.Text, ("event", "event_rx")), - ( - r"(\s*-(?:\s*)(?:.*?))(\s*)(?:(?:(<!--)" r"((?:.*?\n?)*)(-->))|(\n|$))", - bygroups(Text, Text, Keyword, Comment.MultiLine, Keyword, Text), - ), - (r"\s*\>\s*[^\s]*", Name.Constant), - ( - r"(#+(?:\s*)(?:.*?))(\s*)(?:(?:(<!--)((?:.*?\n?)*)(-->))|(\n|$))", - bygroups( - Generic.Heading, Text, Keyword, Comment.MultiLine, Keyword, Text - ), - ), - (r"\s*\*\s*", Name.Variable.Magic, ("intent", "intent_rx")), - (r".*\n", Text), - ], - "event": [include("comment"), (r"\s*(\n|$)", Text, "#pop")], - "event_rx": [(r"({.*?})?", bygroups(using(JsonLexer)), "#pop")], - "intent": [ - (r"\s*OR\s*", Keyword, "intent_rx"), - include("comment"), - (r"\s*(?:\n|$)", Text, "#pop"), - default("#pop"), - ], - "intent_rx": [ - (r'["\'].*["\']', Name.Variable.Magic, "#pop"), - ( - r"([^\s\{]*\s*)({.*?})?", - bygroups(Name.Variable.Magic, using(JsonLexer)), - "#pop", - ), - (r"\s*(\n|$)", Text, "#pop:2"), - ], - } diff --git a/docs/yarn.lock b/docs/yarn.lock new file mode 100644 index 000000000000..a44e349f0a18 --- /dev/null +++ b/docs/yarn.lock @@ -0,0 +1,14668 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@analytics/cookie-utils@^0.2.3": + version "0.2.3" + resolved "https://registry.yarnpkg.com/@analytics/cookie-utils/-/cookie-utils-0.2.3.tgz#e6ab923f88d89f7b02da0cfab585ff193977052f" + integrity sha512-RiMAVpSluRbWb2hlT9wMJ0r2l+MUZzScYjY+w2iWRzjOr9Zzzs4tYzJT6Sd94PDz3LzCuf4aGOwS6pkKXTEBLw== + +"@analytics/core@^0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@analytics/core/-/core-0.6.2.tgz#bfb67f115df278c8edc94e116afbd3f67e6e082b" + integrity sha512-qXBfef5/HK5RZkGlkVqRKnjFQuPZejU6NLqnzx/DH3EU28w7a7IgUN+qZ2VSWFr3aMtNJ0qVybfHDxcJBtrLtQ== + dependencies: + analytics-utils "^0.2.2" + +"@analytics/storage-utils@^0.2.4": + version "0.2.4" + resolved "https://registry.yarnpkg.com/@analytics/storage-utils/-/storage-utils-0.2.4.tgz#b373fc1c3910c201c3020f014c8bffd41cfc3f94" + integrity sha512-VHRggJbRY8vHIADWVwbq9cZux0L9LdmlN31XA3daVAI4gMkKdQEocxB7KqGDt6SfIJ3NYi/qh1nRJGooYmTBiA== + dependencies: + "@analytics/cookie-utils" "^0.2.3" + +"@babel/code-frame@7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.8.3.tgz#33e25903d7481181534e12ec0a25f16b6fcf419e" + integrity sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g== + dependencies: + "@babel/highlight" "^7.8.3" + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.1.tgz#d5481c5095daa1c57e16e54c6f9198443afb49ff" + integrity sha512-IGhtTmpjGbYzcEDOw7DcQtbQSXcG9ftmAXtWTu9V936vDye4xjjekktFAtgZsWpzTj/X01jocB46mTywm/4SZw== + dependencies: + "@babel/highlight" "^7.10.1" + +"@babel/code-frame@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a" + integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg== + dependencies: + "@babel/highlight" "^7.10.4" + +"@babel/compat-data@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.10.1.tgz#b1085ffe72cd17bf2c0ee790fc09f9626011b2db" + integrity sha512-CHvCj7So7iCkGKPRFUfryXIkU2gSBw7VSZFYLsqVhrS47269VK2Hfi9S/YcublPMW8k1u2bQBlbDruoQEm4fgw== + dependencies: + browserslist "^4.12.0" + invariant "^2.2.4" + semver "^5.5.0" + +"@babel/core@7.10.5": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.10.5.tgz#1f15e2cca8ad9a1d78a38ddba612f5e7cdbbd330" + integrity sha512-O34LQooYVDXPl7QWCdW9p4NR+QlzOr7xShPPJz8GsuCU3/8ua/wqTr7gmnxXv+WBESiGU/G5s16i6tUvHkNb+w== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/generator" "^7.10.5" + "@babel/helper-module-transforms" "^7.10.5" + "@babel/helpers" "^7.10.4" + "@babel/parser" "^7.10.5" + "@babel/template" "^7.10.4" + "@babel/traverse" "^7.10.5" + "@babel/types" "^7.10.5" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.1" + json5 "^2.1.2" + lodash "^4.17.19" + resolve "^1.3.2" + semver "^5.4.1" + source-map "^0.5.0" + +"@babel/core@^7.7.5", "@babel/core@^7.9.0": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.10.2.tgz#bd6786046668a925ac2bd2fd95b579b92a23b36a" + integrity sha512-KQmV9yguEjQsXqyOUGKjS4+3K8/DlOCE2pZcq4augdQmtTy5iv5EHtmMSJ7V4c1BIPjuwtZYqYLCq9Ga+hGBRQ== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/generator" "^7.10.2" + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helpers" "^7.10.1" + "@babel/parser" "^7.10.2" + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.2" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.1" + json5 "^2.1.2" + lodash "^4.17.13" + resolve "^1.3.2" + semver "^5.4.1" + source-map "^0.5.0" + +"@babel/generator@^7.10.1", "@babel/generator@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.10.2.tgz#0fa5b5b2389db8bfdfcc3492b551ee20f5dd69a9" + integrity sha512-AxfBNHNu99DTMvlUPlt1h2+Hn7knPpH5ayJ8OqDWSeLld+Fi2AYBTC/IejWDM9Edcii4UzZRCsbUt0WlSDsDsA== + dependencies: + "@babel/types" "^7.10.2" + jsesc "^2.5.1" + lodash "^4.17.13" + source-map "^0.5.0" + +"@babel/generator@^7.10.5": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.10.5.tgz#1b903554bc8c583ee8d25f1e8969732e6b829a69" + integrity sha512-3vXxr3FEW7E7lJZiWQ3bM4+v/Vyr9C+hpolQ8BGFr9Y8Ri2tFLWTixmwKBafDujO1WVah4fhZBeU1bieKdghig== + dependencies: + "@babel/types" "^7.10.5" + jsesc "^2.5.1" + source-map "^0.5.0" + +"@babel/generator@^7.11.0": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.11.0.tgz#4b90c78d8c12825024568cbe83ee6c9af193585c" + integrity sha512-fEm3Uzw7Mc9Xi//qU20cBKatTfs2aOtKqmvy/Vm7RkJEGFQ4xc9myCfbXxqK//ZS8MR/ciOHw6meGASJuKmDfQ== + dependencies: + "@babel/types" "^7.11.0" + jsesc "^2.5.1" + source-map "^0.5.0" + +"@babel/helper-annotate-as-pure@^7.0.0": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.10.4.tgz#5bf0d495a3f757ac3bda48b5bf3b3ba309c72ba3" + integrity sha512-XQlqKQP4vXFB7BN8fEEerrmYvHp3fK/rBkRFz9jaJbzK0B1DSfej9Kc7ZzE8Z/OnId1jpJdNAZ3BFQjWG68rcA== + dependencies: + "@babel/types" "^7.10.4" + +"@babel/helper-annotate-as-pure@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.10.1.tgz#f6d08acc6f70bbd59b436262553fb2e259a1a268" + integrity sha512-ewp3rvJEwLaHgyWGe4wQssC2vjks3E80WiUe2BpMb0KhreTjMROCbxXcEovTrbeGVdQct5VjQfrv9EgC+xMzCw== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-builder-binary-assignment-operator-visitor@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.10.1.tgz#0ec7d9be8174934532661f87783eb18d72290059" + integrity sha512-cQpVq48EkYxUU0xozpGCLla3wlkdRRqLWu1ksFMXA9CM5KQmyyRpSEsYXbao7JUkOw/tAaYKCaYyZq6HOFYtyw== + dependencies: + "@babel/helper-explode-assignable-expression" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-builder-react-jsx-experimental@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-react-jsx-experimental/-/helper-builder-react-jsx-experimental-7.10.1.tgz#9a7d58ad184d3ac3bafb1a452cec2bad7e4a0bc8" + integrity sha512-irQJ8kpQUV3JasXPSFQ+LCCtJSc5ceZrPFVj6TElR6XCHssi3jV8ch3odIrNtjJFRZZVbrOEfJMI79TPU/h1pQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-module-imports" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-builder-react-jsx@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-react-jsx/-/helper-builder-react-jsx-7.10.1.tgz#a327f0cf983af5554701b1215de54a019f09b532" + integrity sha512-KXzzpyWhXgzjXIlJU1ZjIXzUPdej1suE6vzqgImZ/cpAsR/CC8gUcX4EWRmDfWz/cs6HOCPMBIJ3nKoXt3BFuw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-compilation-targets@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.10.2.tgz#a17d9723b6e2c750299d2a14d4637c76936d8285" + integrity sha512-hYgOhF4To2UTB4LTaZepN/4Pl9LD4gfbJx8A34mqoluT8TLbof1mhUlYuNWTEebONa8+UlCC4X0TEXu7AOUyGA== + dependencies: + "@babel/compat-data" "^7.10.1" + browserslist "^4.12.0" + invariant "^2.2.4" + levenary "^1.1.1" + semver "^5.5.0" + +"@babel/helper-create-class-features-plugin@^7.10.1": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.10.2.tgz#7474295770f217dbcf288bf7572eb213db46ee67" + integrity sha512-5C/QhkGFh1vqcziq1vAL6SI9ymzUp8BCYjFpvYVhWP4DlATIb3u5q3iUd35mvlyGs8fO7hckkW7i0tmH+5+bvQ== + dependencies: + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-member-expression-to-functions" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + +"@babel/helper-create-regexp-features-plugin@^7.10.1", "@babel/helper-create-regexp-features-plugin@^7.8.3": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.10.1.tgz#1b8feeab1594cbcfbf3ab5a3bbcabac0468efdbd" + integrity sha512-Rx4rHS0pVuJn5pJOqaqcZR4XSgeF9G/pO/79t+4r7380tXFJdzImFnxMU19f83wjSrmKHq6myrM10pFHTGzkUA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-regex" "^7.10.1" + regexpu-core "^4.7.0" + +"@babel/helper-define-map@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.10.1.tgz#5e69ee8308648470dd7900d159c044c10285221d" + integrity sha512-+5odWpX+OnvkD0Zmq7panrMuAGQBu6aPUgvMzuMGo4R+jUOvealEj2hiqI6WhxgKrTpFoFj0+VdsuA8KDxHBDg== + dependencies: + "@babel/helper-function-name" "^7.10.1" + "@babel/types" "^7.10.1" + lodash "^4.17.13" + +"@babel/helper-explode-assignable-expression@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.10.1.tgz#e9d76305ee1162ca467357ae25df94f179af2b7e" + integrity sha512-vcUJ3cDjLjvkKzt6rHrl767FeE7pMEYfPanq5L16GRtrXIoznc0HykNW2aEYkcnP76P0isoqJ34dDMFZwzEpJg== + dependencies: + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-function-name@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.10.1.tgz#92bd63829bfc9215aca9d9defa85f56b539454f4" + integrity sha512-fcpumwhs3YyZ/ttd5Rz0xn0TpIwVkN7X0V38B9TWNfVF42KEkhkAAuPCQ3oXmtTRtiPJrmZ0TrfS0GKF0eMaRQ== + dependencies: + "@babel/helper-get-function-arity" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-function-name@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.10.4.tgz#d2d3b20c59ad8c47112fa7d2a94bc09d5ef82f1a" + integrity sha512-YdaSyz1n8gY44EmN7x44zBn9zQ1Ry2Y+3GTA+3vH6Mizke1Vw0aWDM66FOYEPw8//qKkmqOckrGgTYa+6sceqQ== + dependencies: + "@babel/helper-get-function-arity" "^7.10.4" + "@babel/template" "^7.10.4" + "@babel/types" "^7.10.4" + +"@babel/helper-get-function-arity@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.1.tgz#7303390a81ba7cb59613895a192b93850e373f7d" + integrity sha512-F5qdXkYGOQUb0hpRaPoetF9AnsXknKjWMZ+wmsIRsp5ge5sFh4c3h1eH2pRTTuy9KKAA2+TTYomGXAtEL2fQEw== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-get-function-arity@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.4.tgz#98c1cbea0e2332f33f9a4661b8ce1505b2c19ba2" + integrity sha512-EkN3YDB+SRDgiIUnNgcmiD361ti+AVbL3f3Henf6dqqUyr5dMsorno0lJWJuLhDhkI5sYEpgj6y9kB8AOU1I2A== + dependencies: + "@babel/types" "^7.10.4" + +"@babel/helper-hoist-variables@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.10.1.tgz#7e77c82e5dcae1ebf123174c385aaadbf787d077" + integrity sha512-vLm5srkU8rI6X3+aQ1rQJyfjvCBLXP8cAGeuw04zeAM2ItKb1e7pmVmLyHb4sDaAYnLL13RHOZPLEtcGZ5xvjg== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-member-expression-to-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.10.1.tgz#432967fd7e12a4afef66c4687d4ca22bc0456f15" + integrity sha512-u7XLXeM2n50gb6PWJ9hoO5oO7JFPaZtrh35t8RqKLT1jFKj9IWeD1zrcrYp1q1qiZTdEarfDWfTIP8nGsu0h5g== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-member-expression-to-functions@^7.10.4": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.11.0.tgz#ae69c83d84ee82f4b42f96e2a09410935a8f26df" + integrity sha512-JbFlKHFntRV5qKw3YC0CvQnDZ4XMwgzzBbld7Ly4Mj4cbFy3KywcR8NtNctRToMWJOVvLINJv525Gd6wwVEx/Q== + dependencies: + "@babel/types" "^7.11.0" + +"@babel/helper-module-imports@^7.0.0", "@babel/helper-module-imports@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.10.4.tgz#4c5c54be04bd31670a7382797d75b9fa2e5b5620" + integrity sha512-nEQJHqYavI217oD9+s5MUBzk6x1IlvoS9WTPfgG43CbMEeStE0v+r+TucWdx8KFGowPGvyOkDT9+7DHedIDnVw== + dependencies: + "@babel/types" "^7.10.4" + +"@babel/helper-module-imports@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.10.1.tgz#dd331bd45bccc566ce77004e9d05fe17add13876" + integrity sha512-SFxgwYmZ3HZPyZwJRiVNLRHWuW2OgE5k2nrVs6D9Iv4PPnXVffuEHy83Sfx/l4SqF+5kyJXjAyUmrG7tNm+qVg== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-module-transforms@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.10.1.tgz#24e2f08ee6832c60b157bb0936c86bef7210c622" + integrity sha512-RLHRCAzyJe7Q7sF4oy2cB+kRnU4wDZY/H2xJFGof+M+SJEGhZsb+GFj5j1AD8NiSaVBJ+Pf0/WObiXu/zxWpFg== + dependencies: + "@babel/helper-module-imports" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + "@babel/helper-simple-access" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + lodash "^4.17.13" + +"@babel/helper-module-transforms@^7.10.5": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.11.0.tgz#b16f250229e47211abdd84b34b64737c2ab2d359" + integrity sha512-02EVu8COMuTRO1TAzdMtpBPbe6aQ1w/8fePD2YgQmxZU4gpNWaL9gK3Jp7dxlkUlUCJOTaSeA+Hrm1BRQwqIhg== + dependencies: + "@babel/helper-module-imports" "^7.10.4" + "@babel/helper-replace-supers" "^7.10.4" + "@babel/helper-simple-access" "^7.10.4" + "@babel/helper-split-export-declaration" "^7.11.0" + "@babel/template" "^7.10.4" + "@babel/types" "^7.11.0" + lodash "^4.17.19" + +"@babel/helper-optimise-call-expression@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.1.tgz#b4a1f2561870ce1247ceddb02a3860fa96d72543" + integrity sha512-a0DjNS1prnBsoKx83dP2falChcs7p3i8VMzdrSbfLhuQra/2ENC4sbri34dz/rWmDADsmF1q5GbfaXydh0Jbjg== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-optimise-call-expression@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.4.tgz#50dc96413d594f995a77905905b05893cd779673" + integrity sha512-n3UGKY4VXwXThEiKrgRAoVPBMqeoPgHVqiHZOanAJCG9nQUL2pLRQirUzl0ioKclHGpGqRgIOkgcIJaIWLpygg== + dependencies: + "@babel/types" "^7.10.4" + +"@babel/helper-plugin-utils@7.10.4", "@babel/helper-plugin-utils@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz#2f75a831269d4f677de49986dff59927533cf375" + integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg== + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.1", "@babel/helper-plugin-utils@^7.8.0": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.1.tgz#ec5a5cf0eec925b66c60580328b122c01230a127" + integrity sha512-fvoGeXt0bJc7VMWZGCAEBEMo/HAjW2mP8apF5eXK0wSqwLAVHAISCWRoLMBMUs2kqeaG77jltVqu4Hn8Egl3nA== + +"@babel/helper-regex@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.10.1.tgz#021cf1a7ba99822f993222a001cc3fec83255b96" + integrity sha512-7isHr19RsIJWWLLFn21ubFt223PjQyg1HY7CZEMRr820HttHPpVvrsIN3bUOo44DEfFV4kBXO7Abbn9KTUZV7g== + dependencies: + lodash "^4.17.13" + +"@babel/helper-remap-async-to-generator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.10.1.tgz#bad6aaa4ff39ce8d4b82ccaae0bfe0f7dbb5f432" + integrity sha512-RfX1P8HqsfgmJ6CwaXGKMAqbYdlleqglvVtht0HGPMSsy2V6MqLlOJVF/0Qyb/m2ZCi2z3q3+s6Pv7R/dQuZ6A== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-wrap-function" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-replace-supers@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.10.1.tgz#ec6859d20c5d8087f6a2dc4e014db7228975f13d" + integrity sha512-SOwJzEfpuQwInzzQJGjGaiG578UYmyi2Xw668klPWV5n07B73S0a9btjLk/52Mlcxa+5AdIYqws1KyXRfMoB7A== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-replace-supers@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.10.4.tgz#d585cd9388ea06e6031e4cd44b6713cbead9e6cf" + integrity sha512-sPxZfFXocEymYTdVK1UNmFPBN+Hv5mJkLPsYWwGBxZAxaWfFu+xqp7b6qWD0yjNuNL2VKc6L5M18tOXUP7NU0A== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.10.4" + "@babel/helper-optimise-call-expression" "^7.10.4" + "@babel/traverse" "^7.10.4" + "@babel/types" "^7.10.4" + +"@babel/helper-simple-access@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.10.1.tgz#08fb7e22ace9eb8326f7e3920a1c2052f13d851e" + integrity sha512-VSWpWzRzn9VtgMJBIWTZ+GP107kZdQ4YplJlCmIrjoLVSi/0upixezHCDG8kpPVTBJpKfxTH01wDhh+jS2zKbw== + dependencies: + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-simple-access@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.10.4.tgz#0f5ccda2945277a2a7a2d3a821e15395edcf3461" + integrity sha512-0fMy72ej/VEvF8ULmX6yb5MtHG4uH4Dbd6I/aHDb/JVg0bbivwt9Wg+h3uMvX+QSFtwr5MeItvazbrc4jtRAXw== + dependencies: + "@babel/template" "^7.10.4" + "@babel/types" "^7.10.4" + +"@babel/helper-split-export-declaration@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.10.1.tgz#c6f4be1cbc15e3a868e4c64a17d5d31d754da35f" + integrity sha512-UQ1LVBPrYdbchNhLwj6fetj46BcFwfS4NllJo/1aJsT+1dLTEnXJL0qHqtY7gPzF8S2fXBJamf1biAXV3X077g== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-split-export-declaration@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.10.4.tgz#2c70576eaa3b5609b24cb99db2888cc3fc4251d1" + integrity sha512-pySBTeoUff56fL5CBU2hWm9TesA4r/rOkI9DyJLvvgz09MB9YtfIYe3iBriVaYNaPe+Alua0vBIOVOLs2buWhg== + dependencies: + "@babel/types" "^7.10.4" + +"@babel/helper-split-export-declaration@^7.11.0": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.11.0.tgz#f8a491244acf6a676158ac42072911ba83ad099f" + integrity sha512-74Vejvp6mHkGE+m+k5vHY93FX2cAtrw1zXrZXRlG4l410Nm9PxfEiVTn1PjDPV5SnmieiueY4AFg2xqhNFuuZg== + dependencies: + "@babel/types" "^7.11.0" + +"@babel/helper-validator-identifier@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.1.tgz#5770b0c1a826c4f53f5ede5e153163e0318e94b5" + integrity sha512-5vW/JXLALhczRCWP0PnFDMCJAchlBvM7f4uk/jXritBnIa6E1KmqmtrS3yn1LAnxFBypQ3eneLuXjsnfQsgILw== + +"@babel/helper-validator-identifier@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz#a78c7a7251e01f616512d31b10adcf52ada5e0d2" + integrity sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw== + +"@babel/helper-wrap-function@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.10.1.tgz#956d1310d6696257a7afd47e4c42dfda5dfcedc9" + integrity sha512-C0MzRGteVDn+H32/ZgbAv5r56f2o1fZSA/rj/TYo8JEJNHg+9BdSmKBUND0shxWRztWhjlT2cvHYuynpPsVJwQ== + dependencies: + "@babel/helper-function-name" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helpers@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.10.1.tgz#a6827b7cb975c9d9cef5fd61d919f60d8844a973" + integrity sha512-muQNHF+IdU6wGgkaJyhhEmI54MOZBKsFfsXFhboz1ybwJ1Kl7IHlbm2a++4jwrmY5UYsgitt5lfqo1wMFcHmyw== + dependencies: + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helpers@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.10.4.tgz#2abeb0d721aff7c0a97376b9e1f6f65d7a475044" + integrity sha512-L2gX/XeUONeEbI78dXSrJzGdz4GQ+ZTA/aazfUsFaWjSe95kiCuOZ5HsXvkiw3iwF+mFHSRUfJU8t6YavocdXA== + dependencies: + "@babel/template" "^7.10.4" + "@babel/traverse" "^7.10.4" + "@babel/types" "^7.10.4" + +"@babel/highlight@^7.10.1", "@babel/highlight@^7.8.3": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.1.tgz#841d098ba613ba1a427a2b383d79e35552c38ae0" + integrity sha512-8rMof+gVP8mxYZApLF/JgNDAkdKa+aJt3ZYxF8z6+j/hpeXL7iMsKCPHa2jNMHu/qqBwzQF4OHNoYi8dMA/rYg== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@babel/highlight@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.4.tgz#7d1bdfd65753538fabe6c38596cdb76d9ac60143" + integrity sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA== + dependencies: + "@babel/helper-validator-identifier" "^7.10.4" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@babel/parser@^7.0.0", "@babel/parser@^7.11.0", "@babel/parser@^7.9.4": + version "7.11.1" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.11.1.tgz#d91a387990b21e5d20047b336bb19b0553f02ff5" + integrity sha512-u9QMIRdKVF7hfEkb3nu2LgZDIzCQPv+yHD9Eg6ruoJLjkrQ9fFz4IBSlF/9XwoNri9+2F1IY+dYuOfZrXq8t3w== + +"@babel/parser@^7.10.1", "@babel/parser@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.10.2.tgz#871807f10442b92ff97e4783b9b54f6a0ca812d0" + integrity sha512-PApSXlNMJyB4JiGVhCOlzKIif+TKFTvu0aQAhnTvfP/z3vVSN6ZypH5bfUNwFXXjRQtUEBNFd2PtmCmG2Py3qQ== + +"@babel/parser@^7.10.4", "@babel/parser@^7.10.5": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.10.5.tgz#e7c6bf5a7deff957cec9f04b551e2762909d826b" + integrity sha512-wfryxy4bE1UivvQKSQDU4/X6dr+i8bctjUjj8Zyt3DQy7NtPizJXT8M52nqpNKL+nq2PW8lxk4ZqLj0fD4B4hQ== + +"@babel/plugin-proposal-async-generator-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.10.1.tgz#6911af5ba2e615c4ff3c497fe2f47b35bf6d7e55" + integrity sha512-vzZE12ZTdB336POZjmpblWfNNRpMSua45EYnRigE2XsZxcXcIyly2ixnTJasJE4Zq3U7t2d8rRF7XRUuzHxbOw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-remap-async-to-generator" "^7.10.1" + "@babel/plugin-syntax-async-generators" "^7.8.0" + +"@babel/plugin-proposal-class-properties@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.10.1.tgz#046bc7f6550bb08d9bd1d4f060f5f5a4f1087e01" + integrity sha512-sqdGWgoXlnOdgMXU+9MbhzwFRgxVLeiGBqTrnuS7LC2IBU31wSsESbTUreT2O418obpfPdGUR2GbEufZF1bpqw== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-proposal-dynamic-import@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.10.1.tgz#e36979dc1dc3b73f6d6816fc4951da2363488ef0" + integrity sha512-Cpc2yUVHTEGPlmiQzXj026kqwjEQAD9I4ZC16uzdbgWgitg/UHKHLffKNCQZ5+y8jpIZPJcKcwsr2HwPh+w3XA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-dynamic-import" "^7.8.0" + +"@babel/plugin-proposal-json-strings@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.10.1.tgz#b1e691ee24c651b5a5e32213222b2379734aff09" + integrity sha512-m8r5BmV+ZLpWPtMY2mOKN7wre6HIO4gfIiV+eOmsnZABNenrt/kzYBwrh+KOfgumSWpnlGs5F70J8afYMSJMBg== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-json-strings" "^7.8.0" + +"@babel/plugin-proposal-nullish-coalescing-operator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.10.1.tgz#02dca21673842ff2fe763ac253777f235e9bbf78" + integrity sha512-56cI/uHYgL2C8HVuHOuvVowihhX0sxb3nnfVRzUeVHTWmRHTZrKuAh/OBIMggGU/S1g/1D2CRCXqP+3u7vX7iA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" + +"@babel/plugin-proposal-numeric-separator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.10.1.tgz#a9a38bc34f78bdfd981e791c27c6fdcec478c123" + integrity sha512-jjfym4N9HtCiNfyyLAVD8WqPYeHUrw4ihxuAynWj6zzp2gf9Ey2f7ImhFm6ikB3CLf5Z/zmcJDri6B4+9j9RsA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-numeric-separator" "^7.10.1" + +"@babel/plugin-proposal-object-rest-spread@7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.10.4.tgz#50129ac216b9a6a55b3853fdd923e74bf553a4c0" + integrity sha512-6vh4SqRuLLarjgeOf4EaROJAHjvu9Gl+/346PbDH9yWbJyfnJ/ah3jmYKYtswEyCoWZiidvVHjHshd4WgjB9BA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + "@babel/plugin-syntax-object-rest-spread" "^7.8.0" + "@babel/plugin-transform-parameters" "^7.10.4" + +"@babel/plugin-proposal-object-rest-spread@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.10.1.tgz#cba44908ac9f142650b4a65b8aa06bf3478d5fb6" + integrity sha512-Z+Qri55KiQkHh7Fc4BW6o+QBuTagbOp9txE+4U1i79u9oWlf2npkiDx+Rf3iK3lbcHBuNy9UOkwuR5wOMH3LIQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-object-rest-spread" "^7.8.0" + "@babel/plugin-transform-parameters" "^7.10.1" + +"@babel/plugin-proposal-optional-catch-binding@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.10.1.tgz#c9f86d99305f9fa531b568ff5ab8c964b8b223d2" + integrity sha512-VqExgeE62YBqI3ogkGoOJp1R6u12DFZjqwJhqtKc2o5m1YTUuUWnos7bZQFBhwkxIFpWYJ7uB75U7VAPPiKETA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" + +"@babel/plugin-proposal-optional-chaining@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.10.1.tgz#15f5d6d22708629451a91be28f8facc55b0e818c" + integrity sha512-dqQj475q8+/avvok72CF3AOSV/SGEcH29zT5hhohqqvvZ2+boQoOr7iGldBG5YXTO2qgCgc2B3WvVLUdbeMlGA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" + +"@babel/plugin-proposal-optional-chaining@^7.10.3": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.10.4.tgz#750f1255e930a1f82d8cdde45031f81a0d0adff7" + integrity sha512-ZIhQIEeavTgouyMSdZRap4VPPHqJJ3NEs2cuHs5p0erH+iz6khB0qfgU8g7UuJkG88+fBMy23ZiU+nuHvekJeQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" + +"@babel/plugin-proposal-private-methods@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.10.1.tgz#ed85e8058ab0fe309c3f448e5e1b73ca89cdb598" + integrity sha512-RZecFFJjDiQ2z6maFprLgrdnm0OzoC23Mx89xf1CcEsxmHuzuXOdniEuI+S3v7vjQG4F5sa6YtUp+19sZuSxHg== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-proposal-unicode-property-regex@^7.10.1", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.10.1.tgz#dc04feb25e2dd70c12b05d680190e138fa2c0c6f" + integrity sha512-JjfngYRvwmPwmnbRZyNiPFI8zxCZb8euzbCG/LxyKdeTb59tVciKo9GK9bi6JYKInk1H11Dq9j/zRqIH4KigfQ== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-syntax-async-generators@^7.8.0": + version "7.8.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" + integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-class-properties@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.10.1.tgz#d5bc0645913df5b17ad7eda0fa2308330bde34c5" + integrity sha512-Gf2Yx/iRs1JREDtVZ56OrjjgFHCaldpTnuy9BHla10qyVT3YkIIGEtoDWhyop0ksu1GvNjHIoYRBqm3zoR1jyQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-syntax-dynamic-import@^7.8.0", "@babel/plugin-syntax-dynamic-import@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" + integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-json-strings@^7.8.0": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" + integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-jsx@7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.10.4.tgz#39abaae3cbf710c4373d8429484e6ba21340166c" + integrity sha512-KCg9mio9jwiARCB7WAcQ7Y1q+qicILjoK8LP/VkPkEKaf5dkaZZK1EcTe91a3JJlZ3qy6L5s9X52boEYi8DM9g== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + +"@babel/plugin-syntax-jsx@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.10.1.tgz#0ae371134a42b91d5418feb3c8c8d43e1565d2da" + integrity sha512-+OxyOArpVFXQeXKLO9o+r2I4dIoVoy6+Uu0vKELrlweDM3QJADZj+Z+5ERansZqIZBcLj42vHnDI8Rz9BnRIuQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.0": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" + integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-numeric-separator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.1.tgz#25761ee7410bc8cf97327ba741ee94e4a61b7d99" + integrity sha512-uTd0OsHrpe3tH5gRPTxG8Voh99/WCU78vIm5NMRYPAqC8lR4vajt6KkCAknCHrx24vkPdd/05yfdGSB4EIY2mg== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" + integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-optional-catch-binding@^7.8.0": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" + integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-optional-chaining@^7.8.0": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" + integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-top-level-await@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.10.1.tgz#8b8733f8c57397b3eaa47ddba8841586dcaef362" + integrity sha512-hgA5RYkmZm8FTFT3yu2N9Bx7yVVOKYT6yEdXXo6j2JTm0wNxgqaGeQVaSHRjhfnQbX91DtjFB6McRFSlcJH3xQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-syntax-typescript@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.10.1.tgz#5e82bc27bb4202b93b949b029e699db536733810" + integrity sha512-X/d8glkrAtra7CaQGMiGs/OGa6XgUzqPcBXCIGFCpCqnfGlT0Wfbzo/B89xHhnInTaItPK8LALblVXcUOEh95Q== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-arrow-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.10.1.tgz#cb5ee3a36f0863c06ead0b409b4cc43a889b295b" + integrity sha512-6AZHgFJKP3DJX0eCNJj01RpytUa3SOGawIxweHkNX2L6PYikOZmoh5B0d7hIHaIgveMjX990IAa/xK7jRTN8OA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-async-to-generator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.10.1.tgz#e5153eb1a3e028f79194ed8a7a4bf55f862b2062" + integrity sha512-XCgYjJ8TY2slj6SReBUyamJn3k2JLUIiiR5b6t1mNCMSvv7yx+jJpaewakikp0uWFQSF7ChPPoe3dHmXLpISkg== + dependencies: + "@babel/helper-module-imports" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-remap-async-to-generator" "^7.10.1" + +"@babel/plugin-transform-block-scoped-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.10.1.tgz#146856e756d54b20fff14b819456b3e01820b85d" + integrity sha512-B7K15Xp8lv0sOJrdVAoukKlxP9N59HS48V1J3U/JGj+Ad+MHq+am6xJVs85AgXrQn4LV8vaYFOB+pr/yIuzW8Q== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-block-scoping@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.10.1.tgz#47092d89ca345811451cd0dc5d91605982705d5e" + integrity sha512-8bpWG6TtF5akdhIm/uWTyjHqENpy13Fx8chg7pFH875aNLwX8JxIxqm08gmAT+Whe6AOmaTeLPe7dpLbXt+xUw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + lodash "^4.17.13" + +"@babel/plugin-transform-classes@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.10.1.tgz#6e11dd6c4dfae70f540480a4702477ed766d733f" + integrity sha512-P9V0YIh+ln/B3RStPoXpEQ/CoAxQIhRSUn7aXqQ+FZJ2u8+oCtjIXR3+X0vsSD8zv+mb56K7wZW1XiDTDGiDRQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-define-map" "^7.10.1" + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.10.1.tgz#59aa399064429d64dce5cf76ef9b90b7245ebd07" + integrity sha512-mqSrGjp3IefMsXIenBfGcPXxJxweQe2hEIwMQvjtiDQ9b1IBvDUjkAtV/HMXX47/vXf14qDNedXsIiNd1FmkaQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-destructuring@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.10.1.tgz#abd58e51337815ca3a22a336b85f62b998e71907" + integrity sha512-V/nUc4yGWG71OhaTH705pU8ZSdM6c1KmmLP8ys59oOYbT7RpMYAR3MsVOt6OHL0WzG7BlTU076va9fjJyYzJMA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-dotall-regex@^7.10.1", "@babel/plugin-transform-dotall-regex@^7.4.4": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.10.1.tgz#920b9fec2d78bb57ebb64a644d5c2ba67cc104ee" + integrity sha512-19VIMsD1dp02RvduFUmfzj8uknaO3uiHHF0s3E1OHnVsNj8oge8EQ5RzHRbJjGSetRnkEuBYO7TG1M5kKjGLOA== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-duplicate-keys@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.10.1.tgz#c900a793beb096bc9d4d0a9d0cde19518ffc83b9" + integrity sha512-wIEpkX4QvX8Mo9W6XF3EdGttrIPZWozHfEaDTU0WJD/TDnXMvdDh30mzUl/9qWhnf7naicYartcEfUghTCSNpA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-exponentiation-operator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.10.1.tgz#279c3116756a60dd6e6f5e488ba7957db9c59eb3" + integrity sha512-lr/przdAbpEA2BUzRvjXdEDLrArGRRPwbaF9rvayuHRvdQ7lUTTkZnhZrJ4LE2jvgMRFF4f0YuPQ20vhiPYxtA== + dependencies: + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-for-of@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.10.1.tgz#ff01119784eb0ee32258e8646157ba2501fcfda5" + integrity sha512-US8KCuxfQcn0LwSCMWMma8M2R5mAjJGsmoCBVwlMygvmDUMkTCykc84IqN1M7t+agSfOmLYTInLCHJM+RUoz+w== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-function-name@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.10.1.tgz#4ed46fd6e1d8fde2a2ec7b03c66d853d2c92427d" + integrity sha512-//bsKsKFBJfGd65qSNNh1exBy5Y9gD9ZN+DvrJ8f7HXr4avE5POW6zB7Rj6VnqHV33+0vXWUwJT0wSHubiAQkw== + dependencies: + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-literals@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.10.1.tgz#5794f8da82846b22e4e6631ea1658bce708eb46a" + integrity sha512-qi0+5qgevz1NHLZroObRm5A+8JJtibb7vdcPQF1KQE12+Y/xxl8coJ+TpPW9iRq+Mhw/NKLjm+5SHtAHCC7lAw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-member-expression-literals@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.10.1.tgz#90347cba31bca6f394b3f7bd95d2bbfd9fce2f39" + integrity sha512-UmaWhDokOFT2GcgU6MkHC11i0NQcL63iqeufXWfRy6pUOGYeCGEKhvfFO6Vz70UfYJYHwveg62GS83Rvpxn+NA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-modules-amd@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.10.1.tgz#65950e8e05797ebd2fe532b96e19fc5482a1d52a" + integrity sha512-31+hnWSFRI4/ACFr1qkboBbrTxoBIzj7qA69qlq8HY8p7+YCzkCT6/TvQ1a4B0z27VeWtAeJd6pr5G04dc1iHw== + dependencies: + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + babel-plugin-dynamic-import-node "^2.3.3" + +"@babel/plugin-transform-modules-commonjs@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.10.1.tgz#d5ff4b4413ed97ffded99961056e1fb980fb9301" + integrity sha512-AQG4fc3KOah0vdITwt7Gi6hD9BtQP/8bhem7OjbaMoRNCH5Djx42O2vYMfau7QnAzQCa+RJnhJBmFFMGpQEzrg== + dependencies: + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-simple-access" "^7.10.1" + babel-plugin-dynamic-import-node "^2.3.3" + +"@babel/plugin-transform-modules-systemjs@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.10.1.tgz#9962e4b0ac6aaf2e20431ada3d8ec72082cbffb6" + integrity sha512-ewNKcj1TQZDL3YnO85qh9zo1YF1CHgmSTlRQgHqe63oTrMI85cthKtZjAiZSsSNjPQ5NCaYo5QkbYqEw1ZBgZA== + dependencies: + "@babel/helper-hoist-variables" "^7.10.1" + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + babel-plugin-dynamic-import-node "^2.3.3" + +"@babel/plugin-transform-modules-umd@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.10.1.tgz#ea080911ffc6eb21840a5197a39ede4ee67b1595" + integrity sha512-EIuiRNMd6GB6ulcYlETnYYfgv4AxqrswghmBRQbWLHZxN4s7mupxzglnHqk9ZiUpDI4eRWewedJJNj67PWOXKA== + dependencies: + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-named-capturing-groups-regex@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.8.3.tgz#a2a72bffa202ac0e2d0506afd0939c5ecbc48c6c" + integrity sha512-f+tF/8UVPU86TrCb06JoPWIdDpTNSGGcAtaD9mLP0aYGA0OS0j7j7DHJR0GTFrUZPUU6loZhbsVZgTh0N+Qdnw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.8.3" + +"@babel/plugin-transform-new-target@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.10.1.tgz#6ee41a5e648da7632e22b6fb54012e87f612f324" + integrity sha512-MBlzPc1nJvbmO9rPr1fQwXOM2iGut+JC92ku6PbiJMMK7SnQc1rytgpopveE3Evn47gzvGYeCdgfCDbZo0ecUw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-object-super@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.10.1.tgz#2e3016b0adbf262983bf0d5121d676a5ed9c4fde" + integrity sha512-WnnStUDN5GL+wGQrJylrnnVlFhFmeArINIR9gjhSeYyvroGhBrSAXYg/RHsnfzmsa+onJrTJrEClPzgNmmQ4Gw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + +"@babel/plugin-transform-parameters@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.10.1.tgz#b25938a3c5fae0354144a720b07b32766f683ddd" + integrity sha512-tJ1T0n6g4dXMsL45YsSzzSDZCxiHXAQp/qHrucOq5gEHncTA3xDxnd5+sZcoQp+N1ZbieAaB8r/VUCG0gqseOg== + dependencies: + "@babel/helper-get-function-arity" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-parameters@^7.10.4": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.10.5.tgz#59d339d58d0b1950435f4043e74e2510005e2c4a" + integrity sha512-xPHwUj5RdFV8l1wuYiu5S9fqWGM2DrYc24TMvUiRrPVm+SM3XeqU9BcokQX/kEUe+p2RBwy+yoiR1w/Blq6ubw== + dependencies: + "@babel/helper-get-function-arity" "^7.10.4" + "@babel/helper-plugin-utils" "^7.10.4" + +"@babel/plugin-transform-property-literals@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.10.1.tgz#cffc7315219230ed81dc53e4625bf86815b6050d" + integrity sha512-Kr6+mgag8auNrgEpbfIWzdXYOvqDHZOF0+Bx2xh4H2EDNwcbRb9lY6nkZg8oSjsX+DH9Ebxm9hOqtKW+gRDeNA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-react-constant-elements@^7.9.0": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.10.1.tgz#c7f117a54657cba3f9d32012e050fc89982df9e1" + integrity sha512-V4os6bkWt/jbrzfyVcZn2ZpuHZkvj3vyBU0U/dtS8SZuMS7Rfx5oknTrtfyXJ2/QZk8gX7Yls5Z921ItNpE30Q== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-react-display-name@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.10.1.tgz#e6a33f6d48dfb213dda5e007d0c7ff82b6a3d8ef" + integrity sha512-rBjKcVwjk26H3VX8pavMxGf33LNlbocMHdSeldIEswtQ/hrjyTG8fKKILW1cSkODyRovckN/uZlGb2+sAV9JUQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-react-jsx-development@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.10.1.tgz#1ac6300d8b28ef381ee48e6fec430cc38047b7f3" + integrity sha512-XwDy/FFoCfw9wGFtdn5Z+dHh6HXKHkC6DwKNWpN74VWinUagZfDcEJc3Y8Dn5B3WMVnAllX8Kviaw7MtC5Epwg== + dependencies: + "@babel/helper-builder-react-jsx-experimental" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-jsx" "^7.10.1" + +"@babel/plugin-transform-react-jsx-self@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.10.1.tgz#22143e14388d72eb88649606bb9e46f421bc3821" + integrity sha512-4p+RBw9d1qV4S749J42ZooeQaBomFPrSxa9JONLHJ1TxCBo3TzJ79vtmG2S2erUT8PDDrPdw4ZbXGr2/1+dILA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-jsx" "^7.10.1" + +"@babel/plugin-transform-react-jsx-source@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.10.1.tgz#30db3d4ee3cdebbb26a82a9703673714777a4273" + integrity sha512-neAbaKkoiL+LXYbGDvh6PjPG+YeA67OsZlE78u50xbWh2L1/C81uHiNP5d1fw+uqUIoiNdCC8ZB+G4Zh3hShJA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-jsx" "^7.10.1" + +"@babel/plugin-transform-react-jsx@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.10.1.tgz#91f544248ba131486decb5d9806da6a6e19a2896" + integrity sha512-MBVworWiSRBap3Vs39eHt+6pJuLUAaK4oxGc8g+wY+vuSJvLiEQjW1LSTqKb8OUPtDvHCkdPhk7d6sjC19xyFw== + dependencies: + "@babel/helper-builder-react-jsx" "^7.10.1" + "@babel/helper-builder-react-jsx-experimental" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-jsx" "^7.10.1" + +"@babel/plugin-transform-react-pure-annotations@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.10.1.tgz#f5e7c755d3e7614d4c926e144f501648a5277b70" + integrity sha512-mfhoiai083AkeewsBHUpaS/FM1dmUENHBMpS/tugSJ7VXqXO5dCN1Gkint2YvM1Cdv1uhmAKt1ZOuAjceKmlLA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-regenerator@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.10.1.tgz#10e175cbe7bdb63cc9b39f9b3f823c5c7c5c5490" + integrity sha512-B3+Y2prScgJ2Bh/2l9LJxKbb8C8kRfsG4AdPT+n7ixBHIxJaIG8bi8tgjxUMege1+WqSJ+7gu1YeoMVO3gPWzw== + dependencies: + regenerator-transform "^0.14.2" + +"@babel/plugin-transform-reserved-words@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.10.1.tgz#0fc1027312b4d1c3276a57890c8ae3bcc0b64a86" + integrity sha512-qN1OMoE2nuqSPmpTqEM7OvJ1FkMEV+BjVeZZm9V9mq/x1JLKQ4pcv8riZJMNN3u2AUGl0ouOMjRr2siecvHqUQ== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-runtime@^7.9.0": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.10.1.tgz#fd1887f749637fb2ed86dc278e79eb41df37f4b1" + integrity sha512-4w2tcglDVEwXJ5qxsY++DgWQdNJcCCsPxfT34wCUwIf2E7dI7pMpH8JczkMBbgBTNzBX62SZlNJ9H+De6Zebaw== + dependencies: + "@babel/helper-module-imports" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + resolve "^1.8.1" + semver "^5.5.1" + +"@babel/plugin-transform-shorthand-properties@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.10.1.tgz#e8b54f238a1ccbae482c4dce946180ae7b3143f3" + integrity sha512-AR0E/lZMfLstScFwztApGeyTHJ5u3JUKMjneqRItWeEqDdHWZwAOKycvQNCasCK/3r5YXsuNG25funcJDu7Y2g== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-spread@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.10.1.tgz#0c6d618a0c4461a274418460a28c9ccf5239a7c8" + integrity sha512-8wTPym6edIrClW8FI2IoaePB91ETOtg36dOkj3bYcNe7aDMN2FXEoUa+WrmPc4xa1u2PQK46fUX2aCb+zo9rfw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-sticky-regex@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.10.1.tgz#90fc89b7526228bed9842cff3588270a7a393b00" + integrity sha512-j17ojftKjrL7ufX8ajKvwRilwqTok4q+BjkknmQw9VNHnItTyMP5anPFzxFJdCQs7clLcWpCV3ma+6qZWLnGMA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-regex" "^7.10.1" + +"@babel/plugin-transform-template-literals@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.10.1.tgz#914c7b7f4752c570ea00553b4284dad8070e8628" + integrity sha512-t7B/3MQf5M1T9hPCRG28DNGZUuxAuDqLYS03rJrIk2prj/UV7Z6FOneijhQhnv/Xa039vidXeVbvjK2SK5f7Gg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-typeof-symbol@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.10.1.tgz#60c0239b69965d166b80a84de7315c1bc7e0bb0e" + integrity sha512-qX8KZcmbvA23zDi+lk9s6hC1FM7jgLHYIjuLgULgc8QtYnmB3tAVIYkNoKRQ75qWBeyzcoMoK8ZQmogGtC/w0g== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-typescript@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.10.1.tgz#2c54daea231f602468686d9faa76f182a94507a6" + integrity sha512-v+QWKlmCnsaimLeqq9vyCsVRMViZG1k2SZTlcZvB+TqyH570Zsij8nvVUZzOASCRiQFUxkLrn9Wg/kH0zgy5OQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-typescript" "^7.10.1" + +"@babel/plugin-transform-unicode-escapes@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.10.1.tgz#add0f8483dab60570d9e03cecef6c023aa8c9940" + integrity sha512-zZ0Poh/yy1d4jeDWpx/mNwbKJVwUYJX73q+gyh4bwtG0/iUlzdEu0sLMda8yuDFS6LBQlT/ST1SJAR6zYwXWgw== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/plugin-transform-unicode-regex@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.10.1.tgz#6b58f2aea7b68df37ac5025d9c88752443a6b43f" + integrity sha512-Y/2a2W299k0VIUdbqYm9X2qS6fE0CUBhhiPpimK6byy7OJ/kORLlIX+J6UrjgNu5awvs62k+6RSslxhcvVw2Tw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + +"@babel/preset-env@^7.9.0", "@babel/preset-env@^7.9.5": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.10.2.tgz#715930f2cf8573b0928005ee562bed52fb65fdfb" + integrity sha512-MjqhX0RZaEgK/KueRzh+3yPSk30oqDKJ5HP5tqTSB1e2gzGS3PLy7K0BIpnp78+0anFuSwOeuCf1zZO7RzRvEA== + dependencies: + "@babel/compat-data" "^7.10.1" + "@babel/helper-compilation-targets" "^7.10.2" + "@babel/helper-module-imports" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-proposal-async-generator-functions" "^7.10.1" + "@babel/plugin-proposal-class-properties" "^7.10.1" + "@babel/plugin-proposal-dynamic-import" "^7.10.1" + "@babel/plugin-proposal-json-strings" "^7.10.1" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.10.1" + "@babel/plugin-proposal-numeric-separator" "^7.10.1" + "@babel/plugin-proposal-object-rest-spread" "^7.10.1" + "@babel/plugin-proposal-optional-catch-binding" "^7.10.1" + "@babel/plugin-proposal-optional-chaining" "^7.10.1" + "@babel/plugin-proposal-private-methods" "^7.10.1" + "@babel/plugin-proposal-unicode-property-regex" "^7.10.1" + "@babel/plugin-syntax-async-generators" "^7.8.0" + "@babel/plugin-syntax-class-properties" "^7.10.1" + "@babel/plugin-syntax-dynamic-import" "^7.8.0" + "@babel/plugin-syntax-json-strings" "^7.8.0" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" + "@babel/plugin-syntax-numeric-separator" "^7.10.1" + "@babel/plugin-syntax-object-rest-spread" "^7.8.0" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" + "@babel/plugin-syntax-top-level-await" "^7.10.1" + "@babel/plugin-transform-arrow-functions" "^7.10.1" + "@babel/plugin-transform-async-to-generator" "^7.10.1" + "@babel/plugin-transform-block-scoped-functions" "^7.10.1" + "@babel/plugin-transform-block-scoping" "^7.10.1" + "@babel/plugin-transform-classes" "^7.10.1" + "@babel/plugin-transform-computed-properties" "^7.10.1" + "@babel/plugin-transform-destructuring" "^7.10.1" + "@babel/plugin-transform-dotall-regex" "^7.10.1" + "@babel/plugin-transform-duplicate-keys" "^7.10.1" + "@babel/plugin-transform-exponentiation-operator" "^7.10.1" + "@babel/plugin-transform-for-of" "^7.10.1" + "@babel/plugin-transform-function-name" "^7.10.1" + "@babel/plugin-transform-literals" "^7.10.1" + "@babel/plugin-transform-member-expression-literals" "^7.10.1" + "@babel/plugin-transform-modules-amd" "^7.10.1" + "@babel/plugin-transform-modules-commonjs" "^7.10.1" + "@babel/plugin-transform-modules-systemjs" "^7.10.1" + "@babel/plugin-transform-modules-umd" "^7.10.1" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.8.3" + "@babel/plugin-transform-new-target" "^7.10.1" + "@babel/plugin-transform-object-super" "^7.10.1" + "@babel/plugin-transform-parameters" "^7.10.1" + "@babel/plugin-transform-property-literals" "^7.10.1" + "@babel/plugin-transform-regenerator" "^7.10.1" + "@babel/plugin-transform-reserved-words" "^7.10.1" + "@babel/plugin-transform-shorthand-properties" "^7.10.1" + "@babel/plugin-transform-spread" "^7.10.1" + "@babel/plugin-transform-sticky-regex" "^7.10.1" + "@babel/plugin-transform-template-literals" "^7.10.1" + "@babel/plugin-transform-typeof-symbol" "^7.10.1" + "@babel/plugin-transform-unicode-escapes" "^7.10.1" + "@babel/plugin-transform-unicode-regex" "^7.10.1" + "@babel/preset-modules" "^0.1.3" + "@babel/types" "^7.10.2" + browserslist "^4.12.0" + core-js-compat "^3.6.2" + invariant "^2.2.2" + levenary "^1.1.1" + semver "^5.5.0" + +"@babel/preset-modules@^0.1.3": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.3.tgz#13242b53b5ef8c883c3cf7dddd55b36ce80fbc72" + integrity sha512-Ra3JXOHBq2xd56xSF7lMKXdjBn3T772Y1Wet3yWnkDly9zHvJki029tAFzvAAK5cf4YV3yoxuP61crYRol6SVg== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" + "@babel/plugin-transform-dotall-regex" "^7.4.4" + "@babel/types" "^7.4.4" + esutils "^2.0.2" + +"@babel/preset-react@^7.9.4": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.10.1.tgz#e2ab8ae9a363ec307b936589f07ed753192de041" + integrity sha512-Rw0SxQ7VKhObmFjD/cUcKhPTtzpeviEFX1E6PgP+cYOhQ98icNqtINNFANlsdbQHrmeWnqdxA4Tmnl1jy5tp3Q== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-transform-react-display-name" "^7.10.1" + "@babel/plugin-transform-react-jsx" "^7.10.1" + "@babel/plugin-transform-react-jsx-development" "^7.10.1" + "@babel/plugin-transform-react-jsx-self" "^7.10.1" + "@babel/plugin-transform-react-jsx-source" "^7.10.1" + "@babel/plugin-transform-react-pure-annotations" "^7.10.1" + +"@babel/preset-typescript@^7.9.0": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.10.1.tgz#a8d8d9035f55b7d99a2461a0bdc506582914d07e" + integrity sha512-m6GV3y1ShiqxnyQj10600ZVOFrSSAa8HQ3qIUk2r+gcGtHTIRw0dJnFLt1WNXpKjtVw7yw1DAPU/6ma2ZvgJuA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-transform-typescript" "^7.10.1" + +"@babel/runtime-corejs3@^7.10.4": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.10.5.tgz#a57fe6c13045ca33768a2aa527ead795146febe1" + integrity sha512-RMafpmrNB5E/bwdSphLr8a8++9TosnyJp98RZzI6VOx2R2CCMpsXXXRvmI700O9oEKpXdZat6oEK68/F0zjd4A== + dependencies: + core-js-pure "^3.0.0" + regenerator-runtime "^0.13.4" + +"@babel/runtime@^7.0.0", "@babel/runtime@^7.7.2": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.10.5.tgz#303d8bd440ecd5a491eae6117fd3367698674c5c" + integrity sha512-otddXKhdNn7d0ptoFRHtMLa8LqDxLYwTjB4nYgM1yy5N6gU/MUf8zqyyLltCH3yAVitBzmwK4us+DD0l/MauAg== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/runtime@^7.1.2", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.10.2.tgz#d103f21f2602497d38348a32e008637d506db839" + integrity sha512-6sF3uQw2ivImfVIl62RZ7MXhO2tap69WeWK57vAaimT6AZbE4FbqjdEJIN1UqoD6wI6B+1n9UiagafH1sxjOtg== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/template@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.10.1.tgz#e167154a94cb5f14b28dc58f5356d2162f539811" + integrity sha512-OQDg6SqvFSsc9A0ej6SKINWrpJiNonRIniYondK2ViKhB06i3c0s+76XUft71iqBEe9S1OKsHwPAjfHnuvnCig== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/template@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.10.4.tgz#3251996c4200ebc71d1a8fc405fba940f36ba278" + integrity sha512-ZCjD27cGJFUB6nmCB1Enki3r+L5kJveX9pq1SvAUKoICy6CZ9yD8xO086YXdYhvNjBdnekm4ZnaP5yC8Cs/1tA== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/parser" "^7.10.4" + "@babel/types" "^7.10.4" + +"@babel/traverse@^7.0.0": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.10.5.tgz#77ce464f5b258be265af618d8fddf0536f20b564" + integrity sha512-yc/fyv2gUjPqzTz0WHeRJH2pv7jA9kA7mBX2tXl/x5iOE81uaVPuGPtaYk7wmkx4b67mQ7NqI8rmT2pF47KYKQ== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/generator" "^7.10.5" + "@babel/helper-function-name" "^7.10.4" + "@babel/helper-split-export-declaration" "^7.10.4" + "@babel/parser" "^7.10.5" + "@babel/types" "^7.10.5" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.19" + +"@babel/traverse@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.10.1.tgz#bbcef3031e4152a6c0b50147f4958df54ca0dd27" + integrity sha512-C/cTuXeKt85K+p08jN6vMDz8vSV0vZcI0wmQ36o6mjbuo++kPMdpOYw23W2XH04dbRt9/nMEfA4W3eR21CD+TQ== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/generator" "^7.10.1" + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.13" + +"@babel/traverse@^7.10.4", "@babel/traverse@^7.10.5", "@babel/traverse@^7.9.0": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.11.0.tgz#9b996ce1b98f53f7c3e4175115605d56ed07dd24" + integrity sha512-ZB2V+LskoWKNpMq6E5UUCrjtDUh5IOTAyIl0dTjIEoXum/iKWkoIEKIRDnUucO6f+2FzNkE0oD4RLKoPIufDtg== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/generator" "^7.11.0" + "@babel/helper-function-name" "^7.10.4" + "@babel/helper-split-export-declaration" "^7.11.0" + "@babel/parser" "^7.11.0" + "@babel/types" "^7.11.0" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.19" + +"@babel/types@^7.10.1", "@babel/types@^7.10.2", "@babel/types@^7.4.4", "@babel/types@^7.9.5": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.10.2.tgz#30283be31cad0dbf6fb00bd40641ca0ea675172d" + integrity sha512-AD3AwWBSz0AWF0AkCN9VPiWrvldXq+/e3cHa4J89vo4ymjz1XwrBFFVZmkJTsQIPNk+ZVomPSXUJqq8yyjZsng== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + lodash "^4.17.13" + to-fast-properties "^2.0.0" + +"@babel/types@^7.10.4", "@babel/types@^7.10.5": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.10.5.tgz#d88ae7e2fde86bfbfe851d4d81afa70a997b5d15" + integrity sha512-ixV66KWfCI6GKoA/2H9v6bQdbfXEwwpOdQ8cRvb4F+eyvhlaHxWFMQB4+3d9QFJXZsiiiqVrewNV0DFEQpyT4Q== + dependencies: + "@babel/helper-validator-identifier" "^7.10.4" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + +"@babel/types@^7.11.0": + version "7.11.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.11.0.tgz#2ae6bf1ba9ae8c3c43824e5861269871b206e90d" + integrity sha512-O53yME4ZZI0jO1EVGtF1ePGl0LHirG4P1ibcD80XyzZcKhcMFeCXmh4Xb1ifGBIV233Qg12x4rBfQgA+tmOukA== + dependencies: + "@babel/helper-validator-identifier" "^7.10.4" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + +"@bugsnag/browser@^7.2.1": + version "7.2.1" + resolved "https://registry.yarnpkg.com/@bugsnag/browser/-/browser-7.2.1.tgz#bf14a827412d6eaeeb43dfff58df95249fae6040" + integrity sha512-9lspcQncEq031DaXYID/fg8RG+B1tKpXd/P3QqHHEK1+adfk6q4SNY+s7B1H66kdZ2wUmiHphhjBWL3KDC2bSA== + dependencies: + "@bugsnag/core" "^7.2.1" + +"@bugsnag/core@^7.2.1": + version "7.2.1" + resolved "https://registry.yarnpkg.com/@bugsnag/core/-/core-7.2.1.tgz#b694121a677462639144a04fde2b0156885f8ef1" + integrity sha512-frGarZoLB2Rf7/ohsCcGW24qrt1D/EcOUehjAq1U/0R7Amw/oVeeyTE6bBu8OCIh7Giw7f8vfB6uHHxgEEfM9w== + dependencies: + "@bugsnag/cuid" "^3.0.0" + "@bugsnag/safe-json-stringify" "^6.0.0" + error-stack-parser "^2.0.3" + iserror "0.0.2" + stack-generator "^2.0.3" + +"@bugsnag/cuid@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@bugsnag/cuid/-/cuid-3.0.0.tgz#2ee7642a30aee6dc86f5e7f824653741e42e5c35" + integrity sha512-LOt8aaBI+KvOQGneBtpuCz3YqzyEAehd1f3nC5yr9TIYW1+IzYKa2xWS4EiMz5pPOnRPHkyyS5t/wmSmN51Gjg== + +"@bugsnag/js@^7.0.0": + version "7.2.1" + resolved "https://registry.yarnpkg.com/@bugsnag/js/-/js-7.2.1.tgz#843711039d52766295e7b7d231654dde36726845" + integrity sha512-Z6tEEnCqQ8/XE+zIuouMOnmQOmAEMwQnmBO6VrR+IkN7bULGVlotMTj1KMBI6wBkri+jBcSYWcO5CR9ESgKcUg== + dependencies: + "@bugsnag/browser" "^7.2.1" + "@bugsnag/node" "^7.2.1" + +"@bugsnag/node@^7.2.1": + version "7.2.1" + resolved "https://registry.yarnpkg.com/@bugsnag/node/-/node-7.2.1.tgz#ac1f66d1754b69f63f3c404a1c834daf316056a2" + integrity sha512-y7wXZ8+uGsziyyfu8cEWig4w4i7CZOL0Ip3VwZJxRNjihBc0XsxLvqmiJgkzy8POM+iVKO5DY9lqUXywhcVtYw== + dependencies: + "@bugsnag/core" "^7.2.1" + byline "^5.0.0" + error-stack-parser "^2.0.2" + iserror "^0.0.2" + pump "^3.0.0" + stack-generator "^2.0.3" + +"@bugsnag/safe-json-stringify@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@bugsnag/safe-json-stringify/-/safe-json-stringify-6.0.0.tgz#22abdcd83e008c369902976730c34c150148a758" + integrity sha512-htzFO1Zc57S8kgdRK9mLcPVTW1BY2ijfH7Dk2CeZmspTWKdKqSo1iwmqrq2WtRjFlo8aRZYgLX0wFrDXF/9DLA== + +"@csstools/convert-colors@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@csstools/convert-colors/-/convert-colors-1.4.0.tgz#ad495dc41b12e75d588c6db8b9834f08fa131eb7" + integrity sha512-5a6wqoJV/xEdbRNKVo6I4hO3VjyDq//8q2f9I6PBAvMesJHFauXDorcNCsr9RzvsZnaWi5NYCcfyqP1QeFHFbw== + +"@dabh/diagnostics@^2.0.2": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@dabh/diagnostics/-/diagnostics-2.0.2.tgz#290d08f7b381b8f94607dc8f471a12c675f9db31" + integrity sha512-+A1YivoVDNNVCdfozHSR8v/jyuuLTMXwjWuxPFlFlUapXoGc+Gj9mDlTDDfrwl7rXCl2tNZ0kE8sIBO6YOn96Q== + dependencies: + colorspace "1.1.x" + enabled "2.0.x" + kuler "^2.0.0" + +"@docusaurus/core@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-2.0.0-alpha.61.tgz#4b736d36687bd12fb9399f9ce352e87da4bf8fe7" + integrity sha512-Ev0v5J7L/Pm3VJMdhhyR8I9tUQo8MhVRUUT+Bf0W3TMYG6jp2cIXE88yCfxOsTDducS7EMrdtUXfvePGH9CE/A== + dependencies: + "@babel/core" "^7.9.0" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.10.1" + "@babel/plugin-proposal-optional-chaining" "^7.10.3" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-transform-runtime" "^7.9.0" + "@babel/preset-env" "^7.9.0" + "@babel/preset-react" "^7.9.4" + "@babel/preset-typescript" "^7.9.0" + "@babel/runtime" "^7.9.2" + "@babel/runtime-corejs3" "^7.10.4" + "@docusaurus/types" "^2.0.0-alpha.61" + "@docusaurus/utils" "^2.0.0-alpha.61" + "@endiliey/static-site-generator-webpack-plugin" "^4.0.0" + "@hapi/joi" "^17.1.1" + "@svgr/webpack" "^5.4.0" + babel-loader "^8.1.0" + babel-plugin-dynamic-import-node "^2.3.0" + boxen "^4.2.0" + cache-loader "^4.1.0" + chalk "^3.0.0" + chokidar "^3.3.0" + commander "^4.0.1" + copy-webpack-plugin "^5.0.5" + core-js "^2.6.5" + css-loader "^3.4.2" + del "^5.1.0" + detect-port "^1.3.0" + eta "^1.1.1" + express "^4.17.1" + file-loader "^6.0.0" + fs-extra "^8.1.0" + globby "^10.0.1" + html-minifier-terser "^5.0.5" + html-tags "^3.1.0" + html-webpack-plugin "^4.0.4" + import-fresh "^3.2.1" + inquirer "^7.2.0" + is-root "^2.1.0" + lodash "^4.5.2" + lodash.has "^4.5.2" + lodash.isplainobject "^4.0.6" + lodash.isstring "^4.0.1" + mini-css-extract-plugin "^0.8.0" + nprogress "^0.2.0" + null-loader "^3.0.0" + optimize-css-assets-webpack-plugin "^5.0.3" + pnp-webpack-plugin "^1.6.4" + postcss-loader "^3.0.0" + postcss-preset-env "^6.7.0" + react-dev-utils "^10.2.1" + react-helmet "^6.0.0-beta" + react-loadable "^5.5.0" + react-loadable-ssr-addon "^0.2.3" + react-router "^5.1.2" + react-router-config "^5.1.1" + react-router-dom "^5.1.2" + resolve-pathname "^3.0.0" + semver "^6.3.0" + serve-handler "^6.1.3" + shelljs "^0.8.4" + std-env "^2.2.1" + terser-webpack-plugin "^2.3.5" + update-notifier "^4.1.0" + url-loader "^4.1.0" + wait-file "^1.0.5" + webpack "^4.41.2" + webpack-bundle-analyzer "^3.6.1" + webpack-dev-server "^3.11.0" + webpack-merge "^4.2.2" + webpackbar "^4.0.0" + +"@docusaurus/mdx-loader@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-alpha.61.tgz#90b33f928c72d47c51a3d438990e30f143b2b1ee" + integrity sha512-n7VMfyshgMjoVI2YdQFlPVcMTSR+XOl2UbOTgJXDmD4yCeLOSaj63g8fwVoCy+NRkPgjpWGTGCeLNs63dk9jYg== + dependencies: + "@babel/parser" "^7.9.4" + "@babel/traverse" "^7.9.0" + "@docusaurus/core" "^2.0.0-alpha.61" + "@mdx-js/mdx" "^1.5.8" + "@mdx-js/react" "^1.5.8" + escape-html "^1.0.3" + file-loader "^6.0.0" + fs-extra "^8.1.0" + github-slugger "^1.3.0" + gray-matter "^4.0.2" + loader-utils "^1.2.3" + mdast-util-to-string "^1.1.0" + remark-emoji "^2.1.0" + stringify-object "^3.3.0" + unist-util-visit "^2.0.2" + url-loader "^4.1.0" + +"@docusaurus/plugin-client-redirects@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-2.0.0-alpha.61.tgz#53dff9fa1d047435975fee035cd11dcf34fc613e" + integrity sha512-4YvU1gVH0USGdoBlJ8XJUs+1pUXxki7pY0DK8bKLn81ufwm69X5DRhlEEl//blZPGg4+M/v6Yjz31yPDlsDRyQ== + dependencies: + "@docusaurus/types" "^2.0.0-alpha.61" + "@docusaurus/utils" "^2.0.0-alpha.61" + "@hapi/joi" "^17.1.1" + "@types/hapi__joi" "^17.1.2" + chalk "^3.0.0" + eta "^1.1.1" + fs-extra "^8.1.0" + globby "^10.0.1" + lodash "^4.17.15" + +"@docusaurus/plugin-content-docs@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-alpha.61.tgz#aa9746aa6302ae6bb7df96152734a138fba649c6" + integrity sha512-1WojgF+0ZQoARVF3I++2ghzG0sY4panxNiWv8Mzo2MdqECj3lgmR8jaVUSXj4bcTzX7uAEVS9MqKYIf3DBpgYg== + dependencies: + "@docusaurus/core" "^2.0.0-alpha.61" + "@docusaurus/mdx-loader" "^2.0.0-alpha.61" + "@docusaurus/types" "^2.0.0-alpha.61" + "@docusaurus/utils" "^2.0.0-alpha.61" + "@docusaurus/utils-validation" "^2.0.0-alpha.61" + "@hapi/joi" "17.1.1" + execa "^3.4.0" + fs-extra "^8.1.0" + globby "^10.0.1" + import-fresh "^3.2.1" + loader-utils "^1.2.3" + lodash.flatmap "^4.5.0" + lodash.groupby "^4.6.0" + lodash.pick "^4.4.0" + lodash.pickby "^4.6.0" + lodash.sortby "^4.6.0" + remark-admonitions "^1.2.1" + shelljs "^0.8.4" + +"@docusaurus/plugin-sitemap@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-alpha.61.tgz#7b3d14951d834051b57223b3e9eb8ffb9bedeaaa" + integrity sha512-7nXJl/zsnr8Hlzxn3bm9NhpwP4sRFGXWwSCWCC4FMrIw9ihXWTtMGe9hDuJx4DqC8xufyQMw26VGauH7XAWdMg== + dependencies: + "@docusaurus/types" "^2.0.0-alpha.61" + "@hapi/joi" "17.1.1" + fs-extra "^8.1.0" + sitemap "^3.2.2" + +"@docusaurus/theme-classic@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-2.0.0-alpha.61.tgz#1023603aa729efe4d929b5e2c3268af1b492b891" + integrity sha512-LPJwDi8iPzBe36+U65h4w5N5rXSuXuxPXWzBe/eF0/miR7VVCKydGSSubQLSMAXV0QWspGJIRSPnwuNH3DjJZg== + dependencies: + "@hapi/joi" "^17.1.1" + "@mdx-js/mdx" "^1.5.8" + "@mdx-js/react" "^1.5.8" + clsx "^1.1.1" + copy-text-to-clipboard "^2.2.0" + infima "0.2.0-alpha.12" + lodash "^4.17.19" + parse-numeric-range "^0.0.2" + prism-react-renderer "^1.1.0" + prismjs "^1.20.0" + prop-types "^15.7.2" + react-router-dom "^5.1.2" + react-toggle "^4.1.1" + +"@docusaurus/types@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-2.0.0-alpha.61.tgz#b2f5eaa18fb242100c0a8011edefe521b09e8375" + integrity sha512-x1fBiL/KNfREvA6B40CCTABjK9KP+kj/H/7mHfiwdtOYvVt9GJSgnjThkVD62lpVFbOhQ5C0togZsSzKlw6H/w== + dependencies: + "@types/webpack" "^4.41.0" + commander "^4.0.1" + querystring "0.2.0" + webpack-merge "^4.2.2" + +"@docusaurus/utils-validation@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-2.0.0-alpha.61.tgz#cc8ddbf4720f29b0cb1d630e7925ab338f9a5432" + integrity sha512-3QrJqZoR5eBz2XG0ijuTIp5AEOe1OHtuv7nkKArOCzFmjuBJLhUTRcECf0K+lcmdJ25zrRAWAYNgTvpVpBjaNg== + dependencies: + "@hapi/joi" "17.1.1" + +"@docusaurus/utils@^2.0.0-alpha.61": + version "2.0.0-alpha.61" + resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-2.0.0-alpha.61.tgz#bf7c7bd5a07419cafe9b67658f7f910b7fe71937" + integrity sha512-MHvR3Rq8Kk9W6skBR3x7mLsDaNrnp6Mmobyc0ZVql+eiLrjiN7SPunvrVJDE90bQ50HZFLLoAkfgfrvbX5mecg== + dependencies: + escape-string-regexp "^2.0.0" + fs-extra "^8.1.0" + gray-matter "^4.0.2" + lodash.camelcase "^4.3.0" + lodash.kebabcase "^4.1.1" + resolve-pathname "^3.0.0" + +"@emotion/babel-utils@^0.6.4": + version "0.6.10" + resolved "https://registry.yarnpkg.com/@emotion/babel-utils/-/babel-utils-0.6.10.tgz#83dbf3dfa933fae9fc566e54fbb45f14674c6ccc" + integrity sha512-/fnkM/LTEp3jKe++T0KyTszVGWNKPNOUJfjNKLO17BzQ6QPxgbg3whayom1Qr2oLFH3V92tDymU+dT5q676uow== + dependencies: + "@emotion/hash" "^0.6.6" + "@emotion/memoize" "^0.6.6" + "@emotion/serialize" "^0.9.1" + convert-source-map "^1.5.1" + find-root "^1.1.0" + source-map "^0.7.2" + +"@emotion/hash@^0.6.2", "@emotion/hash@^0.6.6": + version "0.6.6" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.6.6.tgz#62266c5f0eac6941fece302abad69f2ee7e25e44" + integrity sha512-ojhgxzUHZ7am3D2jHkMzPpsBAiB005GF5YU4ea+8DNPybMk01JJUM9V9YRlF/GE95tcOm8DxQvWA2jq19bGalQ== + +"@emotion/is-prop-valid@^0.8.1": + version "0.8.8" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-0.8.8.tgz#db28b1c4368a259b60a97311d6a952d4fd01ac1a" + integrity sha512-u5WtneEAr5IDG2Wv65yhunPSMLIpuKsbuOktRojfrEiEvRyC85LgPMZI63cr7NUqT8ZIGdSVg8ZKGxIug4lXcA== + dependencies: + "@emotion/memoize" "0.7.4" + +"@emotion/memoize@0.7.4": + version "0.7.4" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.7.4.tgz#19bf0f5af19149111c40d98bb0cf82119f5d9eeb" + integrity sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw== + +"@emotion/memoize@^0.6.1", "@emotion/memoize@^0.6.6": + version "0.6.6" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.6.6.tgz#004b98298d04c7ca3b4f50ca2035d4f60d2eed1b" + integrity sha512-h4t4jFjtm1YV7UirAFuSuFGyLa+NNxjdkq6DpFLANNQY5rHueFZHVY+8Cu1HYVP6DrheB0kv4m5xPjo7eKT7yQ== + +"@emotion/serialize@^0.9.1": + version "0.9.1" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-0.9.1.tgz#a494982a6920730dba6303eb018220a2b629c145" + integrity sha512-zTuAFtyPvCctHBEL8KZ5lJuwBanGSutFEncqLn/m9T1a6a93smBStK+bZzcNPgj4QS8Rkw9VTwJGhRIUVO8zsQ== + dependencies: + "@emotion/hash" "^0.6.6" + "@emotion/memoize" "^0.6.6" + "@emotion/unitless" "^0.6.7" + "@emotion/utils" "^0.8.2" + +"@emotion/stylis@^0.7.0": + version "0.7.1" + resolved "https://registry.yarnpkg.com/@emotion/stylis/-/stylis-0.7.1.tgz#50f63225e712d99e2b2b39c19c70fff023793ca5" + integrity sha512-/SLmSIkN13M//53TtNxgxo57mcJk/UJIDFRKwOiLIBEyBHEcipgR6hNMQ/59Sl4VjCJ0Z/3zeAZyvnSLPG/1HQ== + +"@emotion/unitless@^0.6.2", "@emotion/unitless@^0.6.7": + version "0.6.7" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.6.7.tgz#53e9f1892f725b194d5e6a1684a7b394df592397" + integrity sha512-Arj1hncvEVqQ2p7Ega08uHLr1JuRYBuO5cIvcA+WWEQ5+VmkOE3ZXzl04NbQxeQpWX78G7u6MqxKuNX3wvYZxg== + +"@emotion/unitless@^0.7.0": + version "0.7.5" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.5.tgz#77211291c1900a700b8a78cfafda3160d76949ed" + integrity sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg== + +"@emotion/utils@^0.8.2": + version "0.8.2" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-0.8.2.tgz#576ff7fb1230185b619a75d258cbc98f0867a8dc" + integrity sha512-rLu3wcBWH4P5q1CGoSSH/i9hrXs7SlbRLkoq9IGuoPYNGQvDJ3pt/wmOM+XgYjIDRMVIdkUWt0RsfzF50JfnCw== + +"@endiliey/static-site-generator-webpack-plugin@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@endiliey/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.0.tgz#94bfe58fd83aeda355de797fcb5112adaca3a6b1" + integrity sha512-3MBqYCs30qk1OBRC697NqhGouYbs71D1B8hrk/AFJC6GwF2QaJOQZtA1JYAaGSe650sZ8r5ppRTtCRXepDWlng== + dependencies: + bluebird "^3.7.1" + cheerio "^0.22.0" + eval "^0.1.4" + url "^0.11.0" + webpack-sources "^1.4.3" + +"@fortawesome/fontawesome-common-types@^0.2.30": + version "0.2.30" + resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-0.2.30.tgz#2f1cc5b46bd76723be41d0013a8450c9ba92b777" + integrity sha512-TsRwpTuKwFNiPhk1UfKgw7zNPeV5RhNp2Uw3pws+9gDAkPGKrtjR1y2lI3SYn7+YzyfuNknflpBA1LRKjt7hMg== + +"@fortawesome/fontawesome-svg-core@^1.2.30": + version "1.2.30" + resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-1.2.30.tgz#f56dc6791861fe5d1af04fb8abddb94658c576db" + integrity sha512-E3sAXATKCSVnT17HYmZjjbcmwihrNOCkoU7dVMlasrcwiJAHxSKeZ+4WN5O+ElgO/FaYgJmASl8p9N7/B/RttA== + dependencies: + "@fortawesome/fontawesome-common-types" "^0.2.30" + +"@fortawesome/free-solid-svg-icons@^5.14.0": + version "5.14.0" + resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-5.14.0.tgz#970453f5e8c4915ad57856c3a0252ac63f6fec18" + integrity sha512-M933RDM8cecaKMWDSk3FRYdnzWGW7kBBlGNGfvqLVwcwhUPNj9gcw+xZMrqBdRqxnSXdl3zWzTCNNGEtFUq67Q== + dependencies: + "@fortawesome/fontawesome-common-types" "^0.2.30" + +"@fortawesome/react-fontawesome@^0.1.11": + version "0.1.11" + resolved "https://registry.yarnpkg.com/@fortawesome/react-fontawesome/-/react-fontawesome-0.1.11.tgz#c1a95a2bdb6a18fa97b355a563832e248bf6ef4a" + integrity sha512-sClfojasRifQKI0OPqTy8Ln8iIhnxR/Pv/hukBhWnBz9kQRmqi6JSH3nghlhAY7SUeIIM7B5/D2G8WjX0iepVg== + dependencies: + prop-types "^15.7.2" + +"@hapi/address@2.x.x": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@hapi/address/-/address-2.1.4.tgz#5d67ed43f3fd41a69d4b9ff7b56e7c0d1d0a81e5" + integrity sha512-QD1PhQk+s31P1ixsX0H0Suoupp3VMXzIVMSwobR3F3MSUO2YCV0B7xqLcUw/Bh8yuvd3LhpyqLQWTNcRmp6IdQ== + +"@hapi/address@^4.0.1": + version "4.1.0" + resolved "https://registry.yarnpkg.com/@hapi/address/-/address-4.1.0.tgz#d60c5c0d930e77456fdcde2598e77302e2955e1d" + integrity sha512-SkszZf13HVgGmChdHo/PxchnSaCJ6cetVqLzyciudzZRT0jcOouIF/Q93mgjw8cce+D+4F4C1Z/WrfFN+O3VHQ== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@hapi/bourne@1.x.x": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@hapi/bourne/-/bourne-1.3.2.tgz#0a7095adea067243ce3283e1b56b8a8f453b242a" + integrity sha512-1dVNHT76Uu5N3eJNTYcvxee+jzX4Z9lfciqRRHCU27ihbUcYi+iSc2iml5Ke1LXe1SyJCLA0+14Jh4tXJgOppA== + +"@hapi/formula@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@hapi/formula/-/formula-2.0.0.tgz#edade0619ed58c8e4f164f233cda70211e787128" + integrity sha512-V87P8fv7PI0LH7LiVi8Lkf3x+KCO7pQozXRssAHNXXL9L1K+uyu4XypLXwxqVDKgyQai6qj3/KteNlrqDx4W5A== + +"@hapi/hoek@8.x.x", "@hapi/hoek@^8.3.0": + version "8.5.1" + resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-8.5.1.tgz#fde96064ca446dec8c55a8c2f130957b070c6e06" + integrity sha512-yN7kbciD87WzLGc5539Tn0sApjyiGHAJgKvG9W8C7O+6c7qmoQMfVs0W4bX17eqz6C78QJqqFrtgdK5EWf6Qow== + +"@hapi/hoek@^9.0.0": + version "9.0.4" + resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.0.4.tgz#e80ad4e8e8d2adc6c77d985f698447e8628b6010" + integrity sha512-EwaJS7RjoXUZ2cXXKZZxZqieGtc7RbvQhUy8FwDoMQtxWVi14tFjeFCYPZAM1mBCpOpiBpyaZbb9NeHc7eGKgw== + +"@hapi/joi@17.1.1", "@hapi/joi@^17.1.1": + version "17.1.1" + resolved "https://registry.yarnpkg.com/@hapi/joi/-/joi-17.1.1.tgz#9cc8d7e2c2213d1e46708c6260184b447c661350" + integrity sha512-p4DKeZAoeZW4g3u7ZeRo+vCDuSDgSvtsB/NpfjXEHTUjSeINAi/RrVOWiVQ1isaoLzMvFEhe8n5065mQq1AdQg== + dependencies: + "@hapi/address" "^4.0.1" + "@hapi/formula" "^2.0.0" + "@hapi/hoek" "^9.0.0" + "@hapi/pinpoint" "^2.0.0" + "@hapi/topo" "^5.0.0" + +"@hapi/joi@^15.1.0": + version "15.1.1" + resolved "https://registry.yarnpkg.com/@hapi/joi/-/joi-15.1.1.tgz#c675b8a71296f02833f8d6d243b34c57b8ce19d7" + integrity sha512-entf8ZMOK8sc+8YfeOlM8pCfg3b5+WZIKBfUaaJT8UsjAAPjartzxIYm3TIbjvA4u+u++KbcXD38k682nVHDAQ== + dependencies: + "@hapi/address" "2.x.x" + "@hapi/bourne" "1.x.x" + "@hapi/hoek" "8.x.x" + "@hapi/topo" "3.x.x" + +"@hapi/pinpoint@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@hapi/pinpoint/-/pinpoint-2.0.0.tgz#805b40d4dbec04fc116a73089494e00f073de8df" + integrity sha512-vzXR5MY7n4XeIvLpfl3HtE3coZYO4raKXW766R6DZw/6aLqR26iuZ109K7a0NtF2Db0jxqh7xz2AxkUwpUFybw== + +"@hapi/topo@3.x.x": + version "3.1.6" + resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-3.1.6.tgz#68d935fa3eae7fdd5ab0d7f953f3205d8b2bfc29" + integrity sha512-tAag0jEcjwH+P2quUfipd7liWCNX2F8NvYjQp2wtInsZxnMlypdw0FtAOLxtvvkO+GSRRbmNi8m/5y42PQJYCQ== + dependencies: + "@hapi/hoek" "^8.3.0" + +"@hapi/topo@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.0.0.tgz#c19af8577fa393a06e9c77b60995af959be721e7" + integrity sha512-tFJlT47db0kMqVm3H4nQYgn6Pwg10GTZHb1pwmSiv1K4ks6drQOtfEF5ZnPjkvC+y4/bUPHK+bc87QvLcL+WMw== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@jest/types@^24.9.0": + version "24.9.0" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-24.9.0.tgz#63cb26cb7500d069e5a389441a7c6ab5e909fc59" + integrity sha512-XKK7ze1apu5JWQ5eZjHITP66AX+QsLlbaJRBGYr8pNzwcAE2JVkwnf0yqjHTsDRcjR0mujy/NmZMXw5kl+kGBw== + dependencies: + "@types/istanbul-lib-coverage" "^2.0.0" + "@types/istanbul-reports" "^1.1.1" + "@types/yargs" "^13.0.0" + +"@mdx-js/mdx@^1.5.8": + version "1.6.16" + resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.16.tgz#f01af0140539c1ce043d246259d8becd2153b2bb" + integrity sha512-jnYyJ0aCafCIehn3GjYcibIapaLBgs3YkoenNQBPcPFyyuUty7B3B07OE+pMllhJ6YkWeP/R5Ax19x0nqTzgJw== + dependencies: + "@babel/core" "7.10.5" + "@babel/plugin-syntax-jsx" "7.10.4" + "@babel/plugin-syntax-object-rest-spread" "7.8.3" + "@mdx-js/util" "1.6.16" + babel-plugin-apply-mdx-type-prop "1.6.16" + babel-plugin-extract-import-names "1.6.16" + camelcase-css "2.0.1" + detab "2.0.3" + hast-util-raw "6.0.0" + lodash.uniq "4.5.0" + mdast-util-to-hast "9.1.0" + remark-footnotes "1.0.0" + remark-mdx "1.6.16" + remark-parse "8.0.3" + remark-squeeze-paragraphs "4.0.0" + style-to-object "0.3.0" + unified "9.1.0" + unist-builder "2.0.3" + unist-util-visit "2.0.3" + +"@mdx-js/react@^1.5.8": + version "1.6.16" + resolved "https://registry.yarnpkg.com/@mdx-js/react/-/react-1.6.16.tgz#538eb14473194d0b3c54020cb230e426174315cd" + integrity sha512-+FhuSVOPo7+4fZaRwWuCSRUcZkJOkZu0rfAbBKvoCg1LWb1Td8Vzi0DTLORdSvgWNbU6+EL40HIgwTOs00x2Jw== + +"@mdx-js/util@1.6.16": + version "1.6.16" + resolved "https://registry.yarnpkg.com/@mdx-js/util/-/util-1.6.16.tgz#07a7342f6b61ea1ecbfb31e6e23bf7a8c79b9b57" + integrity sha512-SFtLGIGZummuyMDPRL5KdmpgI8U19Ble28UjEWihPjGxF1Lgj8aDjLWY8KiaUy9eqb9CKiVCqEIrK9jbnANfkw== + +"@mrmlnc/readdir-enhanced@^2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde" + integrity sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g== + dependencies: + call-me-maybe "^1.0.1" + glob-to-regexp "^0.3.0" + +"@netlify/build@^3.1.2": + version "3.1.10" + resolved "https://registry.yarnpkg.com/@netlify/build/-/build-3.1.10.tgz#5309106bae4e0103e98b74e1cdea315428730e13" + integrity sha512-lm5XAAYltJBIc8BrcSakE6grfSGb3tSxTJfQODTktsdBfrAf8h73aF2B7CsMHzaq97DDyDXHG5aLHm/3hyFFrQ== + dependencies: + "@bugsnag/js" "^7.0.0" + "@netlify/cache-utils" "^1.0.1" + "@netlify/config" "^2.0.9" + "@netlify/functions-utils" "^1.2.6" + "@netlify/git-utils" "^1.0.1" + "@netlify/run-utils" "^1.0.0" + "@netlify/zip-it-and-ship-it" "^1.3.9" + "@sindresorhus/slugify" "^1.1.0" + analytics "0.5.5" + array-flat-polyfill "^1.0.1" + chalk "^3.0.0" + clean-stack "^2.2.0" + execa "^3.3.0" + figures "^3.2.0" + filter-obj "^2.0.1" + global-cache-dir "^1.0.1" + got "^9.6.0" + indent-string "^4.0.0" + is-ci "^2.0.0" + is-plain-obj "^2.1.0" + js-yaml "^3.13.1" + keep-func-props "^3.0.0" + locate-path "^5.0.0" + log-process-errors "^5.1.2" + make-dir "^3.0.2" + map-obj "^4.1.0" + memoize-one "^5.1.1" + os-name "^3.1.0" + p-event "^4.1.0" + p-reduce "^2.1.0" + path-exists "^4.0.0" + path-type "^4.0.0" + pkg-dir "^4.2.0" + pretty-ms "^5.1.0" + read-pkg-up "^7.0.1" + readdirp "^3.4.0" + resolve "^2.0.0-next.1" + safe-json-stringify "^1.2.0" + semver "^7.1.3" + string-width "^4.2.0" + strip-ansi "^6.0.0" + supports-color "^7.1.0" + update-notifier "^4.1.0" + uuid "^8.0.0" + yargs "^15.3.1" + +"@netlify/cache-utils@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@netlify/cache-utils/-/cache-utils-1.0.1.tgz#74693e37844468e259364eb9bcee3d7ce929e8ed" + integrity sha512-wJBHxOWrqGiwgdgd/PxzJC6tVltODDYsqb5Dqs2pJi4LoIXFlAoW+scAWIIURHYs8zodte50pAcMg5cnLLUKPw== + dependencies: + array-flat-polyfill "^1.0.1" + cpy "^8.1.0" + del "^5.1.0" + get-stream "^5.1.0" + global-cache-dir "^1.0.1" + locate-path "^5.0.0" + move-file "^1.2.0" + path-exists "^4.0.0" + readdirp "^3.4.0" + +"@netlify/config@^0.11.5": + version "0.11.11" + resolved "https://registry.yarnpkg.com/@netlify/config/-/config-0.11.11.tgz#72d8d405f6f55f57ee3b542dc5d5ac05e34bb29c" + integrity sha512-Z7yzbx5qCX2I5RLlNyo0MMQ6GKJc8o5Nej9yspCavjqgYlUS7VJfbeE67WNxC26FXwDUqq00zJ0MrCS0Un1YOw== + dependencies: + array-flat-polyfill "^1.0.1" + chalk "^3.0.0" + deepmerge "^4.2.2" + execa "^3.4.0" + fast-safe-stringify "^2.0.7" + filter-obj "^2.0.1" + find-up "^4.1.0" + indent-string "^4.0.0" + is-plain-obj "^2.1.0" + js-yaml "^3.13.1" + netlify "^4.1.7" + p-filter "^2.1.0" + p-locate "^4.1.0" + path-exists "^4.0.0" + toml "^3.0.0" + tomlify-j0.4 "^3.0.0" + yargs "^15.3.0" + +"@netlify/config@^2.0.6", "@netlify/config@^2.0.9": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@netlify/config/-/config-2.0.9.tgz#58355562e2c278dcf5098909bd2b272e689c0e53" + integrity sha512-RppGT4pUYFJEr3AfztNd5FWeNtkxWWO0tz4eycQWPlKce3QcYIWeTuodgzcXW09qQeBCk0oLn7AnkyaHOx2qHA== + dependencies: + array-flat-polyfill "^1.0.1" + chalk "^3.0.0" + deepmerge "^4.2.2" + execa "^3.4.0" + fast-safe-stringify "^2.0.7" + figures "^3.2.0" + filter-obj "^2.0.1" + find-up "^4.1.0" + indent-string "^4.0.0" + is-plain-obj "^2.1.0" + js-yaml "^3.14.0" + netlify "^4.3.10" + p-locate "^4.1.0" + path-exists "^4.0.0" + path-type "^4.0.0" + toml "^3.0.0" + tomlify-j0.4 "^3.0.0" + validate-npm-package-name "^3.0.0" + yargs "^15.3.0" + +"@netlify/functions-utils@^1.2.6": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@netlify/functions-utils/-/functions-utils-1.2.6.tgz#2b37cd84a08a99df97708d9b5dc3058e758172e8" + integrity sha512-vgkX1qD98s6Q5+F0YP3ko78RB8r2OEMnZ+HB96MfxRd6KN5/1ghsuMKRCzIAFPJLOcE6F22/VyvmLbZ7RdOngg== + dependencies: + "@netlify/zip-it-and-ship-it" "^1.3.9" + cpy "^8.1.0" + path-exists "^4.0.0" + +"@netlify/git-utils@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@netlify/git-utils/-/git-utils-1.0.1.tgz#32ee28828488fb9e0527ffbdcd997d7c0b0dfb5e" + integrity sha512-HiKpGyEHOW5ajkU/sQ8mSRIZvznC8GK1AAZWIeBmVRwZIzTykgmZM+HVjPUhfeFH+RK5bWQB4vg7dod2UttI8A== + dependencies: + execa "^3.4.0" + map-obj "^4.1.0" + micromatch "^4.0.2" + moize "^5.4.7" + path-exists "^4.0.0" + +"@netlify/open-api@^0.15.0": + version "0.15.0" + resolved "https://registry.yarnpkg.com/@netlify/open-api/-/open-api-0.15.0.tgz#cf75d2ed2131e73fbcbdddbf1638e92cd0855aa3" + integrity sha512-Nk1NswVrUtI7SDbyn/uvITYeIG5iukKlhsu/6fg4k1G5RMHqMDtVHy+2qcOhKmkf7Qc3ZkIKKd2mQfM6zg2cxw== + +"@netlify/run-utils@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@netlify/run-utils/-/run-utils-1.0.0.tgz#672571199c62fec0387757b98550869efa517176" + integrity sha512-TcdVJZ3J4iu+WYV9sLMBOr1jW2+XgLtIt1LfGYqsH+yqfkWoWavV27y5ttznM1BODFYQb9fMJG/xO2wAeXCzMw== + dependencies: + execa "^3.4.0" + +"@netlify/zip-it-and-ship-it@^1.3.6", "@netlify/zip-it-and-ship-it@^1.3.9": + version "1.3.9" + resolved "https://registry.yarnpkg.com/@netlify/zip-it-and-ship-it/-/zip-it-and-ship-it-1.3.9.tgz#10215838b417ed3605ab657a14733034f482889b" + integrity sha512-4bzOs77+jUvEeYEgLCqETI2VgMqRQYv3cRETZwrYx6paAYrBqP4krTRuahlSUzKHtNecuKdlCjQdoQ6fleKIuA== + dependencies: + archiver "^4.0.0" + common-path-prefix "^2.0.0" + cp-file "^7.0.0" + elf-tools "^1.1.1" + end-of-stream "^1.4.4" + find-up "^4.1.0" + glob "^7.1.6" + junk "^3.1.0" + locate-path "^5.0.0" + make-dir "^3.1.0" + p-map "^3.0.0" + path-exists "^4.0.0" + pkg-dir "^4.2.0" + precinct "^6.3.1" + require-package-name "^2.0.1" + resolve "^2.0.0-next.1" + semver "^7.3.2" + unixify "^1.0.0" + util.promisify "^1.0.1" + yargs "^15.4.1" + +"@nodelib/fs.scandir@2.1.3": + version "2.1.3" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz#3a582bdb53804c6ba6d146579c46e52130cf4a3b" + integrity sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw== + dependencies: + "@nodelib/fs.stat" "2.0.3" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.3", "@nodelib/fs.stat@^2.0.2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz#34dc5f4cabbc720f4e60f75a747e7ecd6c175bd3" + integrity sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA== + +"@nodelib/fs.stat@^1.1.2": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b" + integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw== + +"@nodelib/fs.walk@^1.2.3": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz#011b9202a70a6366e436ca5c065844528ab04976" + integrity sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ== + dependencies: + "@nodelib/fs.scandir" "2.1.3" + fastq "^1.6.0" + +"@oclif/color@^0.x": + version "0.1.2" + resolved "https://registry.yarnpkg.com/@oclif/color/-/color-0.1.2.tgz#28b07e2850d9ce814d0b587ce3403b7ad8f7d987" + integrity sha512-M9o+DOrb8l603qvgz1FogJBUGLqcMFL1aFg2ZEL0FbXJofiNTLOWIeB4faeZTLwE6dt0xH9GpCVpzksMMzGbmA== + dependencies: + ansi-styles "^3.2.1" + chalk "^3.0.0" + strip-ansi "^5.2.0" + supports-color "^5.4.0" + tslib "^1" + +"@oclif/command@^1.5.12", "@oclif/command@^1.5.13", "@oclif/command@^1.5.18", "@oclif/command@^1.5.20", "@oclif/command@^1.6.0": + version "1.8.0" + resolved "https://registry.yarnpkg.com/@oclif/command/-/command-1.8.0.tgz#c1a499b10d26e9d1a611190a81005589accbb339" + integrity sha512-5vwpq6kbvwkQwKqAoOU3L72GZ3Ta8RRrewKj9OJRolx28KLJJ8Dg9Rf7obRwt5jQA9bkYd8gqzMTrI7H3xLfaw== + dependencies: + "@oclif/config" "^1.15.1" + "@oclif/errors" "^1.3.3" + "@oclif/parser" "^3.8.3" + "@oclif/plugin-help" "^3" + debug "^4.1.1" + semver "^7.3.2" + +"@oclif/config@^1.13.2", "@oclif/config@^1.15.1": + version "1.17.0" + resolved "https://registry.yarnpkg.com/@oclif/config/-/config-1.17.0.tgz#ba8639118633102a7e481760c50054623d09fcab" + integrity sha512-Lmfuf6ubjQ4ifC/9bz1fSCHc6F6E653oyaRXxg+lgT4+bYf9bk+nqrUpAbrXyABkCqgIBiFr3J4zR/kiFdE1PA== + dependencies: + "@oclif/errors" "^1.3.3" + "@oclif/parser" "^3.8.0" + debug "^4.1.1" + globby "^11.0.1" + is-wsl "^2.1.1" + tslib "^2.0.0" + +"@oclif/errors@^1.1.2", "@oclif/errors@^1.2.1", "@oclif/errors@^1.2.2", "@oclif/errors@^1.3.3": + version "1.3.3" + resolved "https://registry.yarnpkg.com/@oclif/errors/-/errors-1.3.3.tgz#fb597dfbc58c6b8609dc0b2fdf91a2d487818a82" + integrity sha512-EJR6AIOEkt/NnARNIVAskPDVtdhtO5TTNXmhDrGqMoWVsr0R6DkkLrMyq95BmHvlVWM1nduoq4fQPuCyuF2jaA== + dependencies: + clean-stack "^3.0.0" + fs-extra "^9.0.1" + indent-string "^4.0.0" + strip-ansi "^6.0.0" + wrap-ansi "^7.0.0" + +"@oclif/linewrap@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@oclif/linewrap/-/linewrap-1.0.0.tgz#aedcb64b479d4db7be24196384897b5000901d91" + integrity sha512-Ups2dShK52xXa8w6iBWLgcjPJWjais6KPJQq3gQ/88AY6BXoTX+MIGFPrWQO1KLMiQfoTpcLnUwloN4brrVUHw== + +"@oclif/parser@^3.8.0", "@oclif/parser@^3.8.3", "@oclif/parser@^3.8.4": + version "3.8.5" + resolved "https://registry.yarnpkg.com/@oclif/parser/-/parser-3.8.5.tgz#c5161766a1efca7343e1f25d769efbefe09f639b" + integrity sha512-yojzeEfmSxjjkAvMRj0KzspXlMjCfBzNRPkWw8ZwOSoNWoJn+OCS/m/S+yfV6BvAM4u2lTzX9Y5rCbrFIgkJLg== + dependencies: + "@oclif/errors" "^1.2.2" + "@oclif/linewrap" "^1.0.0" + chalk "^2.4.2" + tslib "^1.9.3" + +"@oclif/plugin-help@^2.2.0": + version "2.2.3" + resolved "https://registry.yarnpkg.com/@oclif/plugin-help/-/plugin-help-2.2.3.tgz#b993041e92047f0e1762668aab04d6738ac06767" + integrity sha512-bGHUdo5e7DjPJ0vTeRBMIrfqTRDBfyR5w0MP41u0n3r7YG5p14lvMmiCXxi6WDaP2Hw5nqx3PnkAIntCKZZN7g== + dependencies: + "@oclif/command" "^1.5.13" + chalk "^2.4.1" + indent-string "^4.0.0" + lodash.template "^4.4.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + widest-line "^2.0.1" + wrap-ansi "^4.0.0" + +"@oclif/plugin-help@^3": + version "3.2.0" + resolved "https://registry.yarnpkg.com/@oclif/plugin-help/-/plugin-help-3.2.0.tgz#b2c1112f49202ebce042f86b2e42e49908172ef1" + integrity sha512-7jxtpwVWAVbp1r46ZnTK/uF+FeZc6y4p1XcGaIUuPAp7wx6NJhIRN/iMT9UfNFX/Cz7mq+OyJz+E+i0zrik86g== + dependencies: + "@oclif/command" "^1.5.20" + "@oclif/config" "^1.15.1" + chalk "^2.4.1" + indent-string "^4.0.0" + lodash.template "^4.4.0" + string-width "^4.2.0" + strip-ansi "^6.0.0" + widest-line "^3.1.0" + wrap-ansi "^4.0.0" + +"@oclif/plugin-not-found@^1.1.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@oclif/plugin-not-found/-/plugin-not-found-1.2.4.tgz#160108c82f0aa10f4fb52cee4e0135af34b7220b" + integrity sha512-G440PCuMi/OT8b71aWkR+kCWikngGtyRjOR24sPMDbpUFV4+B3r51fz1fcqeUiiEOYqUpr0Uy/sneUe1O/NfBg== + dependencies: + "@oclif/color" "^0.x" + "@oclif/command" "^1.6.0" + cli-ux "^4.9.0" + fast-levenshtein "^2.0.6" + lodash "^4.17.13" + +"@oclif/plugin-plugins@^1.7.8": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@oclif/plugin-plugins/-/plugin-plugins-1.9.0.tgz#f6098cc51646585bbf75480e180cef98b5437bc5" + integrity sha512-sq31nJk/n5pH5qGDioj2Z9x6MlRUrc/kkQrfCYKRPbQM80qewSP4RcPK3/gDvDSOAWD3wLAK9oMbDQO9lqImMA== + dependencies: + "@oclif/color" "^0.x" + "@oclif/command" "^1.5.12" + chalk "^2.4.2" + cli-ux "^5.2.1" + debug "^4.1.0" + fs-extra "^7.0.1" + http-call "^5.2.2" + load-json-file "^5.2.0" + npm-run-path "^3.0.0" + semver "^7.3.2" + tslib "^2.0.0" + yarn "^1.21.1" + +"@oclif/screen@^1.0.3": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@oclif/screen/-/screen-1.0.4.tgz#b740f68609dfae8aa71c3a6cab15d816407ba493" + integrity sha512-60CHpq+eqnTxLZQ4PGHYNwUX572hgpMHGPtTWMjdTMsAvlm69lZV/4ly6O3sAYkomo4NggGcomrDpBe34rxUqw== + +"@octokit/auth-token@^2.4.0": + version "2.4.2" + resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-2.4.2.tgz#10d0ae979b100fa6b72fa0e8e63e27e6d0dbff8a" + integrity sha512-jE/lE/IKIz2v1+/P0u4fJqv0kYwXOTujKemJMFr6FeopsxlIK3+wKDCJGnysg81XID5TgZQbIfuJ5J0lnTiuyQ== + dependencies: + "@octokit/types" "^5.0.0" + +"@octokit/endpoint@^6.0.1": + version "6.0.5" + resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-6.0.5.tgz#43a6adee813c5ffd2f719e20cfd14a1fee7c193a" + integrity sha512-70K5u6zd45ItOny6aHQAsea8HHQjlQq85yqOMe+Aj8dkhN2qSJ9T+Q3YjUjEYfPRBcuUWNgMn62DQnP/4LAIiQ== + dependencies: + "@octokit/types" "^5.0.0" + is-plain-object "^4.0.0" + universal-user-agent "^6.0.0" + +"@octokit/plugin-paginate-rest@^1.1.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-1.1.2.tgz#004170acf8c2be535aba26727867d692f7b488fc" + integrity sha512-jbsSoi5Q1pj63sC16XIUboklNw+8tL9VOnJsWycWYR78TKss5PVpIPb1TUUcMQ+bBh7cY579cVAWmf5qG+dw+Q== + dependencies: + "@octokit/types" "^2.0.1" + +"@octokit/plugin-request-log@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-request-log/-/plugin-request-log-1.0.0.tgz#eef87a431300f6148c39a7f75f8cfeb218b2547e" + integrity sha512-ywoxP68aOT3zHCLgWZgwUJatiENeHE7xJzYjfz8WI0goynp96wETBF+d95b8g/uL4QmS6owPVlaxiz3wyMAzcw== + +"@octokit/plugin-rest-endpoint-methods@2.4.0": + version "2.4.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-2.4.0.tgz#3288ecf5481f68c494dd0602fc15407a59faf61e" + integrity sha512-EZi/AWhtkdfAYi01obpX0DF7U6b1VRr30QNQ5xSFPITMdLSfhcBqjamE3F+sKcxPbD7eZuMHu3Qkk2V+JGxBDQ== + dependencies: + "@octokit/types" "^2.0.1" + deprecation "^2.3.1" + +"@octokit/request-error@^1.0.2": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-1.2.1.tgz#ede0714c773f32347576c25649dc013ae6b31801" + integrity sha512-+6yDyk1EES6WK+l3viRDElw96MvwfJxCt45GvmjDUKWjYIb3PJZQkq3i46TwGwoPD4h8NmTrENmtyA1FwbmhRA== + dependencies: + "@octokit/types" "^2.0.0" + deprecation "^2.0.0" + once "^1.4.0" + +"@octokit/request-error@^2.0.0": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-2.0.2.tgz#0e76b83f5d8fdda1db99027ea5f617c2e6ba9ed0" + integrity sha512-2BrmnvVSV1MXQvEkrb9zwzP0wXFNbPJij922kYBTLIlIafukrGOb+ABBT2+c6wZiuyWDH1K1zmjGQ0toN/wMWw== + dependencies: + "@octokit/types" "^5.0.1" + deprecation "^2.0.0" + once "^1.4.0" + +"@octokit/request@^5.2.0": + version "5.4.7" + resolved "https://registry.yarnpkg.com/@octokit/request/-/request-5.4.7.tgz#fd703ee092e0463ceba49ff7a3e61cb4cf8a0fde" + integrity sha512-FN22xUDP0i0uF38YMbOfx6TotpcENP5W8yJM1e/LieGXn6IoRxDMnBf7tx5RKSW4xuUZ/1P04NFZy5iY3Rax1A== + dependencies: + "@octokit/endpoint" "^6.0.1" + "@octokit/request-error" "^2.0.0" + "@octokit/types" "^5.0.0" + deprecation "^2.0.0" + is-plain-object "^4.0.0" + node-fetch "^2.3.0" + once "^1.4.0" + universal-user-agent "^6.0.0" + +"@octokit/rest@^16.28.1": + version "16.43.2" + resolved "https://registry.yarnpkg.com/@octokit/rest/-/rest-16.43.2.tgz#c53426f1e1d1044dee967023e3279c50993dd91b" + integrity sha512-ngDBevLbBTFfrHZeiS7SAMAZ6ssuVmXuya+F/7RaVvlysgGa1JKJkKWY+jV6TCJYcW0OALfJ7nTIGXcBXzycfQ== + dependencies: + "@octokit/auth-token" "^2.4.0" + "@octokit/plugin-paginate-rest" "^1.1.1" + "@octokit/plugin-request-log" "^1.0.0" + "@octokit/plugin-rest-endpoint-methods" "2.4.0" + "@octokit/request" "^5.2.0" + "@octokit/request-error" "^1.0.2" + atob-lite "^2.0.0" + before-after-hook "^2.0.0" + btoa-lite "^1.0.0" + deprecation "^2.0.0" + lodash.get "^4.4.2" + lodash.set "^4.3.2" + lodash.uniq "^4.5.0" + octokit-pagination-methods "^1.1.0" + once "^1.4.0" + universal-user-agent "^4.0.0" + +"@octokit/types@^2.0.0", "@octokit/types@^2.0.1": + version "2.16.2" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-2.16.2.tgz#4c5f8da3c6fecf3da1811aef678fda03edac35d2" + integrity sha512-O75k56TYvJ8WpAakWwYRN8Bgu60KrmX0z1KqFp1kNiFNkgW+JW+9EBKZ+S33PU6SLvbihqd+3drvPxKK68Ee8Q== + dependencies: + "@types/node" ">= 8" + +"@octokit/types@^5.0.0", "@octokit/types@^5.0.1": + version "5.2.1" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-5.2.1.tgz#c212f03b0492faf215fa2ae506d5ec18038c2a36" + integrity sha512-PugtgEw8u++zAyBpDpSkR8K1OsT2l8QWp3ECL6bZHFoq9PfHDoKeGFWSuX2Z+Ghy93k1fkKf8tsmqNBv+8dEfQ== + dependencies: + "@types/node" ">= 8" + +"@philpl/buble@^0.19.7": + version "0.19.7" + resolved "https://registry.yarnpkg.com/@philpl/buble/-/buble-0.19.7.tgz#27231e6391393793b64bc1c982fc7b593198b893" + integrity sha512-wKTA2DxAGEW+QffRQvOhRQ0VBiYU2h2p8Yc1oBNlqSKws48/8faxqKNIuub0q4iuyTuLwtB8EkwiKwhlfV1PBA== + dependencies: + acorn "^6.1.1" + acorn-class-fields "^0.2.1" + acorn-dynamic-import "^4.0.0" + acorn-jsx "^5.0.1" + chalk "^2.4.2" + magic-string "^0.25.2" + minimist "^1.2.0" + os-homedir "^1.0.1" + regexpu-core "^4.5.4" + +"@sindresorhus/is@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" + integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== + +"@sindresorhus/is@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" + integrity sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow== + +"@sindresorhus/slugify@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/slugify/-/slugify-1.1.0.tgz#2f195365d9b953384305b62664b44b4036c49430" + integrity sha512-ujZRbmmizX26yS/HnB3P9QNlNa4+UvHh+rIse3RbOXLp8yl6n1TxB4t7NHggtVgS8QmmOtzXo48kCxZGACpkPw== + dependencies: + "@sindresorhus/transliterate" "^0.1.1" + escape-string-regexp "^4.0.0" + +"@sindresorhus/transliterate@^0.1.1": + version "0.1.1" + resolved "https://registry.yarnpkg.com/@sindresorhus/transliterate/-/transliterate-0.1.1.tgz#779b31244781d3c898f185b61d58c89e7c782674" + integrity sha512-QSdIQ5keUFAZ3KLbfbsntW39ox0Ym8183RqTwBq/ZEFoN3NQAtGV+qWaNdzKpIDHgj9J2CQ2iNDRVU11Zyr7MQ== + dependencies: + escape-string-regexp "^2.0.0" + lodash.deburr "^4.1.0" + +"@svgr/babel-plugin-add-jsx-attribute@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-5.4.0.tgz#81ef61947bb268eb9d50523446f9c638fb355906" + integrity sha512-ZFf2gs/8/6B8PnSofI0inYXr2SDNTDScPXhN7k5EqD4aZ3gi6u+rbmZHVB8IM3wDyx8ntKACZbtXSm7oZGRqVg== + +"@svgr/babel-plugin-remove-jsx-attribute@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-5.4.0.tgz#6b2c770c95c874654fd5e1d5ef475b78a0a962ef" + integrity sha512-yaS4o2PgUtwLFGTKbsiAy6D0o3ugcUhWK0Z45umJ66EPWunAz9fuFw2gJuje6wqQvQWOTJvIahUwndOXb7QCPg== + +"@svgr/babel-plugin-remove-jsx-empty-expression@^5.0.1": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-5.0.1.tgz#25621a8915ed7ad70da6cea3d0a6dbc2ea933efd" + integrity sha512-LA72+88A11ND/yFIMzyuLRSMJ+tRKeYKeQ+mR3DcAZ5I4h5CPWN9AHyUzJbWSYp/u2u0xhmgOe0+E41+GjEueA== + +"@svgr/babel-plugin-replace-jsx-attribute-value@^5.0.1": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-5.0.1.tgz#0b221fc57f9fcd10e91fe219e2cd0dd03145a897" + integrity sha512-PoiE6ZD2Eiy5mK+fjHqwGOS+IXX0wq/YDtNyIgOrc6ejFnxN4b13pRpiIPbtPwHEc+NT2KCjteAcq33/F1Y9KQ== + +"@svgr/babel-plugin-svg-dynamic-title@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-5.4.0.tgz#139b546dd0c3186b6e5db4fefc26cb0baea729d7" + integrity sha512-zSOZH8PdZOpuG1ZVx/cLVePB2ibo3WPpqo7gFIjLV9a0QsuQAzJiwwqmuEdTaW2pegyBE17Uu15mOgOcgabQZg== + +"@svgr/babel-plugin-svg-em-dimensions@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-5.4.0.tgz#6543f69526632a133ce5cabab965deeaea2234a0" + integrity sha512-cPzDbDA5oT/sPXDCUYoVXEmm3VIoAWAPT6mSPTJNbQaBNUuEKVKyGH93oDY4e42PYHRW67N5alJx/eEol20abw== + +"@svgr/babel-plugin-transform-react-native-svg@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-5.4.0.tgz#00bf9a7a73f1cad3948cdab1f8dfb774750f8c80" + integrity sha512-3eYP/SaopZ41GHwXma7Rmxcv9uRslRDTY1estspeB1w1ueZWd/tPlMfEOoccYpEMZU3jD4OU7YitnXcF5hLW2Q== + +"@svgr/babel-plugin-transform-svg-component@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-5.4.0.tgz#a2212b4d018e6075a058bb7e220a66959ef7a03c" + integrity sha512-zLl4Fl3NvKxxjWNkqEcpdSOpQ3LGVH2BNFQ6vjaK6sFo2IrSznrhURIPI0HAphKiiIwNYjAfE0TNoQDSZv0U9A== + +"@svgr/babel-preset@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-5.4.0.tgz#da21854643e1c4ad2279239baa7d5a8b128c1f15" + integrity sha512-Gyx7cCxua04DBtyILTYdQxeO/pwfTBev6+eXTbVbxe4HTGhOUW6yo7PSbG2p6eJMl44j6XSequ0ZDP7bl0nu9A== + dependencies: + "@svgr/babel-plugin-add-jsx-attribute" "^5.4.0" + "@svgr/babel-plugin-remove-jsx-attribute" "^5.4.0" + "@svgr/babel-plugin-remove-jsx-empty-expression" "^5.0.1" + "@svgr/babel-plugin-replace-jsx-attribute-value" "^5.0.1" + "@svgr/babel-plugin-svg-dynamic-title" "^5.4.0" + "@svgr/babel-plugin-svg-em-dimensions" "^5.4.0" + "@svgr/babel-plugin-transform-react-native-svg" "^5.4.0" + "@svgr/babel-plugin-transform-svg-component" "^5.4.0" + +"@svgr/core@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/core/-/core-5.4.0.tgz#655378ee43679eb94fee3d4e1976e38252dff8e7" + integrity sha512-hWGm1DCCvd4IEn7VgDUHYiC597lUYhFau2lwJBYpQWDirYLkX4OsXu9IslPgJ9UpP7wsw3n2Ffv9sW7SXJVfqQ== + dependencies: + "@svgr/plugin-jsx" "^5.4.0" + camelcase "^6.0.0" + cosmiconfig "^6.0.0" + +"@svgr/hast-util-to-babel-ast@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-5.4.0.tgz#bb5d002e428f510aa5b53ec0a02377a95b367715" + integrity sha512-+U0TZZpPsP2V1WvVhqAOSTk+N+CjYHdZx+x9UBa1eeeZDXwH8pt0CrQf2+SvRl/h2CAPRFkm+Ey96+jKP8Bsgg== + dependencies: + "@babel/types" "^7.9.5" + +"@svgr/plugin-jsx@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-5.4.0.tgz#ab47504c55615833c6db70fca2d7e489f509787c" + integrity sha512-SGzO4JZQ2HvGRKDzRga9YFSqOqaNrgLlQVaGvpZ2Iht2gwRp/tq+18Pvv9kS9ZqOMYgyix2LLxZMY1LOe9NPqw== + dependencies: + "@babel/core" "^7.7.5" + "@svgr/babel-preset" "^5.4.0" + "@svgr/hast-util-to-babel-ast" "^5.4.0" + svg-parser "^2.0.2" + +"@svgr/plugin-svgo@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-5.4.0.tgz#45d9800b7099a6f7b4d85ebac89ab9abe8592f64" + integrity sha512-3Cgv3aYi1l6SHyzArV9C36yo4kgwVdF3zPQUC6/aCDUeXAofDYwE5kk3e3oT5ZO2a0N3lB+lLGvipBG6lnG8EA== + dependencies: + cosmiconfig "^6.0.0" + merge-deep "^3.0.2" + svgo "^1.2.2" + +"@svgr/webpack@^5.4.0": + version "5.4.0" + resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-5.4.0.tgz#b68bc86e29cf007292b96ced65f80971175632e0" + integrity sha512-LjepnS/BSAvelnOnnzr6Gg0GcpLmnZ9ThGFK5WJtm1xOqdBE/1IACZU7MMdVzjyUkfFqGz87eRE4hFaSLiUwYg== + dependencies: + "@babel/core" "^7.9.0" + "@babel/plugin-transform-react-constant-elements" "^7.9.0" + "@babel/preset-env" "^7.9.5" + "@babel/preset-react" "^7.9.4" + "@svgr/core" "^5.4.0" + "@svgr/plugin-jsx" "^5.4.0" + "@svgr/plugin-svgo" "^5.4.0" + loader-utils "^2.0.0" + +"@szmarczak/http-timer@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" + integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== + dependencies: + defer-to-connect "^1.0.1" + +"@types/anymatch@*": + version "1.3.1" + resolved "https://registry.yarnpkg.com/@types/anymatch/-/anymatch-1.3.1.tgz#336badc1beecb9dacc38bea2cf32adf627a8421a" + integrity sha512-/+CRPXpBDpo2RK9C68N3b2cOvO0Cf5B9aPijHsoDQTHivnGSObdOF2BRQOYjojWTDy6nQvMjmqRXIxH55VjxxA== + +"@types/color-name@^1.1.1": + version "1.1.1" + resolved "https://registry.yarnpkg.com/@types/color-name/-/color-name-1.1.1.tgz#1c1261bbeaa10a8055bbc5d8ab84b7b2afc846a0" + integrity sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ== + +"@types/decompress@*": + version "4.2.3" + resolved "https://registry.yarnpkg.com/@types/decompress/-/decompress-4.2.3.tgz#98eed48af80001038aa05690b2094915f296fe65" + integrity sha512-W24e3Ycz1UZPgr1ZEDHlK4XnvOr+CpJH3qNsFeqXwwlW/9END9gxn3oJSsp7gYdiQxrXUHwUUd3xuzVz37MrZQ== + dependencies: + "@types/node" "*" + +"@types/download@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@types/download/-/download-6.2.4.tgz#d9fb74defe20d75f59a38a9b5b0eb5037d37161a" + integrity sha512-Lo5dy3ai6LNnbL663sgdzqL1eib11u1yKH6w3v3IXEOO4kRfQpMn1qWUTaumcHLACjFp1RcBx9tUXEvJoR3vcA== + dependencies: + "@types/decompress" "*" + "@types/got" "^8" + "@types/node" "*" + +"@types/glob@^7.1.1": + version "7.1.2" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.2.tgz#06ca26521353a545d94a0adc74f38a59d232c987" + integrity sha512-VgNIkxK+j7Nz5P7jvUZlRvhuPSmsEfS03b0alKcq5V/STUKAa3Plemsn5mrQUO7am6OErJ4rhGEGJbACclrtRA== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + +"@types/got@^8": + version "8.3.5" + resolved "https://registry.yarnpkg.com/@types/got/-/got-8.3.5.tgz#d8a0e8fa7598681b332a4d27779b022b2e55fb7f" + integrity sha512-AaXSrIF99SjjtPVNmCmYb388HML+PKEJb/xmj4SbL2ZO0hHuETZZzyDIKfOqaEoAHZEuX4sC+FRFrHYJoIby6A== + dependencies: + "@types/node" "*" + +"@types/hapi__joi@^17.1.2": + version "17.1.4" + resolved "https://registry.yarnpkg.com/@types/hapi__joi/-/hapi__joi-17.1.4.tgz#e46cd1bd81d25cd45247d652dadb3666514d807c" + integrity sha512-gqY3TeTyZvnyNhM02HgyCIoGIWsTFMnuzMfnD8evTsr1KIfueGJaz+QC77j+dFvhZ5cJArUNjDRHUjPxNohzGA== + +"@types/hast@^2.0.0": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@types/hast/-/hast-2.3.1.tgz#b16872f2a6144c7025f296fb9636a667ebb79cd9" + integrity sha512-viwwrB+6xGzw+G1eWpF9geV3fnsDgXqHG+cqgiHrvQfDUW5hzhCyV7Sy3UJxhfRFBsgky2SSW33qi/YrIkjX5Q== + dependencies: + "@types/unist" "*" + +"@types/html-minifier-terser@^5.0.0": + version "5.1.0" + resolved "https://registry.yarnpkg.com/@types/html-minifier-terser/-/html-minifier-terser-5.1.0.tgz#551a4589b6ee2cc9c1dff08056128aec29b94880" + integrity sha512-iYCgjm1dGPRuo12+BStjd1HiVQqhlRhWDOQigNxn023HcjnhsiFz9pc6CzJj4HwDCSQca9bxTL4PxJDbkdm3PA== + +"@types/http-proxy@^1.17.3": + version "1.17.4" + resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.4.tgz#e7c92e3dbe3e13aa799440ff42e6d3a17a9d045b" + integrity sha512-IrSHl2u6AWXduUaDLqYpt45tLVCtYv7o4Z0s1KghBCDgIIS9oW5K1H8mZG/A2CfeLdEa7rTd1ACOiHBc1EMT2Q== + dependencies: + "@types/node" "*" + +"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.3.tgz#4ba8ddb720221f432e443bd5f9117fd22cfd4762" + integrity sha512-sz7iLqvVUg1gIedBOvlkxPlc8/uVzyS5OwGz1cKjXzkl3FpL3al0crU8YGU1WoHkxn0Wxbw5tyi6hvzJKNzFsw== + +"@types/istanbul-lib-report@*": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#c14c24f18ea8190c118ee7562b7ff99a36552686" + integrity sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg== + dependencies: + "@types/istanbul-lib-coverage" "*" + +"@types/istanbul-reports@^1.1.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-1.1.2.tgz#e875cc689e47bce549ec81f3df5e6f6f11cfaeb2" + integrity sha512-P/W9yOX/3oPZSpaYOCQzGqgCQRXn0FFO/V8bWrCQs+wLmvVVxk6CRBXALEvNs9OHIatlnlFokfhuDo2ug01ciw== + dependencies: + "@types/istanbul-lib-coverage" "*" + "@types/istanbul-lib-report" "*" + +"@types/json-schema@^7.0.4": + version "7.0.5" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.5.tgz#dcce4430e64b443ba8945f0290fb564ad5bac6dd" + integrity sha512-7+2BITlgjgDhH0vvwZU/HZJVyk+2XUlvxXe8dFMedNX/aMkaOq++rMAFXc0tM7ij15QaWlbdQASBR9dihi+bDQ== + +"@types/mdast@^3.0.0": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.3.tgz#2d7d671b1cd1ea3deb306ea75036c2a0407d2deb" + integrity sha512-SXPBMnFVQg1s00dlMCc/jCdvPqdE4mXaMMCeRlxLDmTAEoegHT53xKtkDnzDTOcmMHUfcjyf36/YYZ6SxRdnsw== + dependencies: + "@types/unist" "*" + +"@types/minimatch@*": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" + integrity sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA== + +"@types/mkdirp@^0.5.2": + version "0.5.2" + resolved "https://registry.yarnpkg.com/@types/mkdirp/-/mkdirp-0.5.2.tgz#503aacfe5cc2703d5484326b1b27efa67a339c1f" + integrity sha512-U5icWpv7YnZYGsN4/cmh3WD2onMY0aJIiTE6+51TwJCttdHvtCYmkBNOobHlXwrJRL0nkH9jH4kD+1FAdMN4Tg== + dependencies: + "@types/node" "*" + +"@types/node-fetch@^2.1.6": + version "2.5.7" + resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.5.7.tgz#20a2afffa882ab04d44ca786449a276f9f6bbf3c" + integrity sha512-o2WVNf5UhWRkxlf6eq+jMZDu7kjgpgJfl4xVNlvryc95O/6F2ld8ztKX+qu+Rjyet93WAWm5LjeX9H5FGkODvw== + dependencies: + "@types/node" "*" + form-data "^3.0.0" + +"@types/node@*": + version "14.0.13" + resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.13.tgz#ee1128e881b874c371374c1f72201893616417c9" + integrity sha512-rouEWBImiRaSJsVA+ITTFM6ZxibuAlTuNOCyxVbwreu6k6+ujs7DfnU9o+PShFhET78pMBl3eH+AGSI5eOTkPA== + +"@types/node@>= 8": + version "14.0.27" + resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.27.tgz#a151873af5a5e851b51b3b065c9e63390a9e0eb1" + integrity sha512-kVrqXhbclHNHGu9ztnAwSncIgJv/FaxmzXJvGXNdcCpV1b8u1/Mi6z6m0vwy0LzKeXFTPLH0NzwmoJ3fNCIq0g== + +"@types/node@^13.11.1": + version "13.13.14" + resolved "https://registry.yarnpkg.com/@types/node/-/node-13.13.14.tgz#20cd7d2a98f0c3b08d379f4ea9e6b315d2019529" + integrity sha512-Az3QsOt1U/K1pbCQ0TXGELTuTkPLOiFIQf3ILzbOyo0FqgV9SxRnxbxM5QlAveERZMHpZY+7u3Jz2tKyl+yg6g== + +"@types/normalize-package-data@^2.4.0": + version "2.4.0" + resolved "https://registry.yarnpkg.com/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz#e486d0d97396d79beedd0a6e33f4534ff6b4973e" + integrity sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA== + +"@types/parse-json@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== + +"@types/parse5@^5.0.0": + version "5.0.3" + resolved "https://registry.yarnpkg.com/@types/parse5/-/parse5-5.0.3.tgz#e7b5aebbac150f8b5fdd4a46e7f0bd8e65e19109" + integrity sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw== + +"@types/q@^1.5.1": + version "1.5.4" + resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.4.tgz#15925414e0ad2cd765bfef58842f7e26a7accb24" + integrity sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug== + +"@types/semver@^5.5.0": + version "5.5.0" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-5.5.0.tgz#146c2a29ee7d3bae4bf2fcb274636e264c813c45" + integrity sha512-41qEJgBH/TWgo5NFSvBCJ1qkoi3Q6ONSF2avrHq1LVEZfYpdHmj0y9SuTK+u9ZhG1sYQKBL1AWXKyLWP4RaUoQ== + +"@types/source-list-map@*": + version "0.1.2" + resolved "https://registry.yarnpkg.com/@types/source-list-map/-/source-list-map-0.1.2.tgz#0078836063ffaf17412349bba364087e0ac02ec9" + integrity sha512-K5K+yml8LTo9bWJI/rECfIPrGgxdpeNbj+d53lwN4QjW1MCwlkhUms+gtdzigTeUyBr09+u8BwOIY3MXvHdcsA== + +"@types/tapable@*", "@types/tapable@^1.0.5": + version "1.0.6" + resolved "https://registry.yarnpkg.com/@types/tapable/-/tapable-1.0.6.tgz#a9ca4b70a18b270ccb2bc0aaafefd1d486b7ea74" + integrity sha512-W+bw9ds02rAQaMvaLYxAbJ6cvguW/iJXNT6lTssS1ps6QdrMKttqEAMEG/b5CR8TZl3/L7/lH0ZV5nNR1LXikA== + +"@types/uglify-js@*": + version "3.9.2" + resolved "https://registry.yarnpkg.com/@types/uglify-js/-/uglify-js-3.9.2.tgz#01992579debba674e1e359cd6bcb1a1d0ab2e02b" + integrity sha512-d6dIfpPbF+8B7WiCi2ELY7m0w1joD8cRW4ms88Emdb2w062NeEpbNCeWwVCgzLRpVG+5e74VFSg4rgJ2xXjEiQ== + dependencies: + source-map "^0.6.1" + +"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2", "@types/unist@^2.0.3": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.3.tgz#9c088679876f374eb5983f150d4787aa6fb32d7e" + integrity sha512-FvUupuM3rlRsRtCN+fDudtmytGO6iHJuuRKS1Ss0pG5z8oX0diNEw94UEL7hgDbpN94rgaK5R7sWm6RrSkZuAQ== + +"@types/webpack-sources@*": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@types/webpack-sources/-/webpack-sources-1.4.0.tgz#e58f1f05f87d39a5c64cf85705bdbdbb94d4d57e" + integrity sha512-c88dKrpSle9BtTqR6ifdaxu1Lvjsl3C5OsfvuUbUwdXymshv1TkufUAXBajCCUM/f/TmnkZC/Esb03MinzSiXQ== + dependencies: + "@types/node" "*" + "@types/source-list-map" "*" + source-map "^0.7.3" + +"@types/webpack@^4.41.0", "@types/webpack@^4.41.8": + version "4.41.17" + resolved "https://registry.yarnpkg.com/@types/webpack/-/webpack-4.41.17.tgz#0a69005e644d657c85b7d6ec1c826a71bebd1c93" + integrity sha512-6FfeCidTSHozwKI67gIVQQ5Mp0g4X96c2IXxX75hYEQJwST/i6NyZexP//zzMOBb+wG9jJ7oO8fk9yObP2HWAw== + dependencies: + "@types/anymatch" "*" + "@types/node" "*" + "@types/tapable" "*" + "@types/uglify-js" "*" + "@types/webpack-sources" "*" + source-map "^0.6.0" + +"@types/yargs-parser@*": + version "15.0.0" + resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-15.0.0.tgz#cb3f9f741869e20cce330ffbeb9271590483882d" + integrity sha512-FA/BWv8t8ZWJ+gEOnLLd8ygxH/2UFbAvgEonyfN6yWGLKc7zVjbpl2Y4CTjid9h2RfgPP6SEt6uHwEOply00yw== + +"@types/yargs@^13.0.0": + version "13.0.9" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-13.0.9.tgz#44028e974343c7afcf3960f1a2b1099c39a7b5e1" + integrity sha512-xrvhZ4DZewMDhoH1utLtOAwYQy60eYFoXeje30TzM3VOvQlBwQaEpKFq5m34k1wOw2AKIi2pwtiAjdmhvlBUzg== + dependencies: + "@types/yargs-parser" "*" + +"@typescript-eslint/typescript-estree@^2.29.0": + version "2.34.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-2.34.0.tgz#14aeb6353b39ef0732cc7f1b8285294937cf37d5" + integrity sha512-OMAr+nJWKdlVM9LOqCqh3pQQPwxHAN7Du8DR6dmwCrAmxtiXQnhHJ6tBNtf+cggqfo51SG/FCwnKhXCIM7hnVg== + dependencies: + debug "^4.1.1" + eslint-visitor-keys "^1.1.0" + glob "^7.1.6" + is-glob "^4.0.1" + lodash "^4.17.15" + semver "^7.3.2" + tsutils "^3.17.1" + +"@webassemblyjs/ast@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.9.0.tgz#bd850604b4042459a5a41cd7d338cbed695ed964" + integrity sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA== + dependencies: + "@webassemblyjs/helper-module-context" "1.9.0" + "@webassemblyjs/helper-wasm-bytecode" "1.9.0" + "@webassemblyjs/wast-parser" "1.9.0" + +"@webassemblyjs/floating-point-hex-parser@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz#3c3d3b271bddfc84deb00f71344438311d52ffb4" + integrity sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA== + +"@webassemblyjs/helper-api-error@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz#203f676e333b96c9da2eeab3ccef33c45928b6a2" + integrity sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw== + +"@webassemblyjs/helper-buffer@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz#a1442d269c5feb23fcbc9ef759dac3547f29de00" + integrity sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA== + +"@webassemblyjs/helper-code-frame@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz#647f8892cd2043a82ac0c8c5e75c36f1d9159f27" + integrity sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA== + dependencies: + "@webassemblyjs/wast-printer" "1.9.0" + +"@webassemblyjs/helper-fsm@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz#c05256b71244214671f4b08ec108ad63b70eddb8" + integrity sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw== + +"@webassemblyjs/helper-module-context@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz#25d8884b76839871a08a6c6f806c3979ef712f07" + integrity sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g== + dependencies: + "@webassemblyjs/ast" "1.9.0" + +"@webassemblyjs/helper-wasm-bytecode@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz#4fed8beac9b8c14f8c58b70d124d549dd1fe5790" + integrity sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw== + +"@webassemblyjs/helper-wasm-section@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz#5a4138d5a6292ba18b04c5ae49717e4167965346" + integrity sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-buffer" "1.9.0" + "@webassemblyjs/helper-wasm-bytecode" "1.9.0" + "@webassemblyjs/wasm-gen" "1.9.0" + +"@webassemblyjs/ieee754@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz#15c7a0fbaae83fb26143bbacf6d6df1702ad39e4" + integrity sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg== + dependencies: + "@xtuc/ieee754" "^1.2.0" + +"@webassemblyjs/leb128@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.9.0.tgz#f19ca0b76a6dc55623a09cffa769e838fa1e1c95" + integrity sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw== + dependencies: + "@xtuc/long" "4.2.2" + +"@webassemblyjs/utf8@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.9.0.tgz#04d33b636f78e6a6813227e82402f7637b6229ab" + integrity sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w== + +"@webassemblyjs/wasm-edit@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz#3fe6d79d3f0f922183aa86002c42dd256cfee9cf" + integrity sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-buffer" "1.9.0" + "@webassemblyjs/helper-wasm-bytecode" "1.9.0" + "@webassemblyjs/helper-wasm-section" "1.9.0" + "@webassemblyjs/wasm-gen" "1.9.0" + "@webassemblyjs/wasm-opt" "1.9.0" + "@webassemblyjs/wasm-parser" "1.9.0" + "@webassemblyjs/wast-printer" "1.9.0" + +"@webassemblyjs/wasm-gen@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz#50bc70ec68ded8e2763b01a1418bf43491a7a49c" + integrity sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-wasm-bytecode" "1.9.0" + "@webassemblyjs/ieee754" "1.9.0" + "@webassemblyjs/leb128" "1.9.0" + "@webassemblyjs/utf8" "1.9.0" + +"@webassemblyjs/wasm-opt@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz#2211181e5b31326443cc8112eb9f0b9028721a61" + integrity sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-buffer" "1.9.0" + "@webassemblyjs/wasm-gen" "1.9.0" + "@webassemblyjs/wasm-parser" "1.9.0" + +"@webassemblyjs/wasm-parser@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz#9d48e44826df4a6598294aa6c87469d642fff65e" + integrity sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-api-error" "1.9.0" + "@webassemblyjs/helper-wasm-bytecode" "1.9.0" + "@webassemblyjs/ieee754" "1.9.0" + "@webassemblyjs/leb128" "1.9.0" + "@webassemblyjs/utf8" "1.9.0" + +"@webassemblyjs/wast-parser@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz#3031115d79ac5bd261556cecc3fa90a3ef451914" + integrity sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/floating-point-hex-parser" "1.9.0" + "@webassemblyjs/helper-api-error" "1.9.0" + "@webassemblyjs/helper-code-frame" "1.9.0" + "@webassemblyjs/helper-fsm" "1.9.0" + "@xtuc/long" "4.2.2" + +"@webassemblyjs/wast-printer@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz#4935d54c85fef637b00ce9f52377451d00d47899" + integrity sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/wast-parser" "1.9.0" + "@xtuc/long" "4.2.2" + +"@xtuc/ieee754@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" + integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA== + +"@xtuc/long@4.2.2": + version "4.2.2" + resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" + integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + +accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7: + version "1.3.7" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd" + integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA== + dependencies: + mime-types "~2.1.24" + negotiator "0.6.2" + +acorn-class-fields@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/acorn-class-fields/-/acorn-class-fields-0.2.1.tgz#748058bceeb0ef25164bbc671993984083f5a085" + integrity sha512-US/kqTe0H8M4LN9izoL+eykVAitE68YMuYZ3sHn3i1fjniqR7oQ3SPvuMK/VT1kjOQHrx5Q88b90TtOKgAv2hQ== + +acorn-dynamic-import@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-4.0.0.tgz#482210140582a36b83c3e342e1cfebcaa9240948" + integrity sha512-d3OEjQV4ROpoflsnUA8HozoIR504TFxNivYEUi6uwz0IYhBkTDXGuWlNdMtybRt3nqVx/L6XqMt0FxkXuWKZhw== + +acorn-jsx@^5.0.1: + version "5.2.0" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.2.0.tgz#4c66069173d6fdd68ed85239fc256226182b2ebe" + integrity sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ== + +acorn-walk@^7.1.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc" + integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== + +acorn@^6.1.1, acorn@^6.4.1: + version "6.4.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.1.tgz#531e58ba3f51b9dacb9a6646ca4debf5b14ca474" + integrity sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA== + +acorn@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.3.1.tgz#85010754db53c3fbaf3b9ea3e083aa5c5d147ffd" + integrity sha512-tLc0wSnatxAQHVHUapaHdz72pi9KUyHjq5KyHjGg9Y8Ifdc79pTh2XvI6I1/chZbnM7QtNKzh66ooDogPZSleA== + +address@1.1.2, address@^1.0.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/address/-/address-1.1.2.tgz#bf1116c9c758c51b7a933d296b72c221ed9428b6" + integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA== + +aggregate-error@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.0.1.tgz#db2fe7246e536f40d9b5442a39e117d7dd6a24e0" + integrity sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA== + dependencies: + clean-stack "^2.0.0" + indent-string "^4.0.0" + +ajv-errors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d" + integrity sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ== + +ajv-keywords@^3.1.0, ajv-keywords@^3.4.1: + version "3.4.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.4.1.tgz#ef916e271c64ac12171fd8384eaae6b2345854da" + integrity sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ== + +ajv@^5.5.2: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + integrity sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU= + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +ajv@^6.1.0, ajv@^6.10.2, ajv@^6.12.2: + version "6.12.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.2.tgz#c629c5eced17baf314437918d2da88c99d5958cd" + integrity sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +alphanum-sort@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" + integrity sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM= + +analytics-utils@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/analytics-utils/-/analytics-utils-0.2.2.tgz#09729ba7514b81b3ce58f12b8eaad32926b58b6a" + integrity sha512-fdbc+MeoNrkwCAbGD/qgedyvRbPnImmWiInAgZ51KpINmKITpdtWV+6riHVA1YBSrb8IyYlfxn98IeWyN9a0+Q== + dependencies: + "@analytics/storage-utils" "^0.2.4" + dlv "^1.1.3" + +analytics@0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/analytics/-/analytics-0.5.5.tgz#b4ff7823ee0b64d37669206af7583eed27d13d7a" + integrity sha512-43JBzud8n1OoN5TUehHpsp38rDyDteFgS9ZPWLyuj5JGc7JgjLAQNN1BCwyM9js4U7LIRxhEyWXPkifndMsOeA== + dependencies: + "@analytics/core" "^0.6.2" + "@analytics/storage-utils" "^0.2.4" + +ansi-align@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-2.0.0.tgz#c36aeccba563b89ceb556f3690f0b1d9e3547f7f" + integrity sha1-w2rsy6VjuJzrVW82kPCx2eNUf38= + dependencies: + string-width "^2.0.0" + +ansi-align@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-3.0.0.tgz#b536b371cf687caaef236c18d3e21fe3797467cb" + integrity sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw== + dependencies: + string-width "^3.0.0" + +ansi-colors@^3.0.0: + version "3.2.4" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" + integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== + +ansi-escapes@^3.0.0, ansi-escapes@^3.1.0, ansi-escapes@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" + integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== + +ansi-escapes@^4.2.1, ansi-escapes@^4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.1.tgz#a5c47cc43181f1f38ffd7076837700d395522a61" + integrity sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA== + dependencies: + type-fest "^0.11.0" + +ansi-html@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/ansi-html/-/ansi-html-0.0.7.tgz#813584021962a9e9e6fd039f940d12f56ca7859e" + integrity sha1-gTWEAhliqenm/QOflA0S9WynhZ4= + +ansi-regex@^0.2.0, ansi-regex@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9" + integrity sha1-DY6UaWej2BQ/k+JOKYUl/BsiNfk= + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= + +ansi-regex@^4.0.0, ansi-regex@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" + integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== + +ansi-regex@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" + integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== + +ansi-styles@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de" + integrity sha1-6uy/Zs1waIJ2Cy9GkVgrj1XXp94= + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0, ansi-styles@^4.2.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.2.1.tgz#90ae75c424d008d2624c5bf29ead3177ebfcf359" + integrity sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA== + dependencies: + "@types/color-name" "^1.1.1" + color-convert "^2.0.1" + +ansicolors@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979" + integrity sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk= + +anymatch@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" + integrity sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw== + dependencies: + micromatch "^3.1.4" + normalize-path "^2.1.1" + +anymatch@~3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.1.tgz#c55ecf02185e2469259399310c173ce31233b142" + integrity sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + +aproba@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== + +archive-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/archive-type/-/archive-type-4.0.0.tgz#f92e72233056dfc6969472749c267bdb046b1d70" + integrity sha1-+S5yIzBW38aWlHJ0nCZ72wRrHXA= + dependencies: + file-type "^4.2.0" + +archiver-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/archiver-utils/-/archiver-utils-2.1.0.tgz#e8a460e94b693c3e3da182a098ca6285ba9249e2" + integrity sha512-bEL/yUb/fNNiNTuUz979Z0Yg5L+LzLxGJz8x79lYmR54fmTIb6ob/hNQgkQnIUDWIFjZVQwl9Xs356I6BAMHfw== + dependencies: + glob "^7.1.4" + graceful-fs "^4.2.0" + lazystream "^1.0.0" + lodash.defaults "^4.2.0" + lodash.difference "^4.5.0" + lodash.flatten "^4.4.0" + lodash.isplainobject "^4.0.6" + lodash.union "^4.6.0" + normalize-path "^3.0.0" + readable-stream "^2.0.0" + +archiver@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/archiver/-/archiver-4.0.2.tgz#43c72865eadb4ddaaa2fb74852527b6a450d927c" + integrity sha512-B9IZjlGwaxF33UN4oPbfBkyA4V1SxNLeIhR1qY8sRXSsbdUkEHrrOvwlYFPx+8uQeCe9M+FG6KgO+imDmQ79CQ== + dependencies: + archiver-utils "^2.1.0" + async "^3.2.0" + buffer-crc32 "^0.2.1" + glob "^7.1.6" + readable-stream "^3.6.0" + tar-stream "^2.1.2" + zip-stream "^3.0.1" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" + integrity sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA= + +arr-flatten@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg== + +arr-union@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" + integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ= + +array-flat-polyfill@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-flat-polyfill/-/array-flat-polyfill-1.0.1.tgz#1e3a4255be619dfbffbfd1d635c1cf357cd034e7" + integrity sha512-hfJmKupmQN0lwi0xG6FQ5U8Rd97RnIERplymOv/qpq8AoNKPPAnxJadjFA23FNWm88wykh9HmpLJUUwUtNU/iw== + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= + +array-flatten@^2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" + integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== + +array-union@^1.0.1, array-union@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= + dependencies: + array-uniq "^1.0.1" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= + +array-unique@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" + integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg= + +arrify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= + +arrify@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-2.0.1.tgz#c9655e9331e0abcd588d2a7cad7e9956f66701fa" + integrity sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug== + +asap@~2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= + +ascii-table@0.0.9: + version "0.0.9" + resolved "https://registry.yarnpkg.com/ascii-table/-/ascii-table-0.0.9.tgz#06a6604d6a55d4bf41a9a47d9872d7a78da31e73" + integrity sha1-BqZgTWpV1L9BqaR9mHLXp42jHnM= + +asn1.js@^4.0.0: + version "4.10.1" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0" + integrity sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw== + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +assert@^1.1.1: + version "1.5.0" + resolved "https://registry.yarnpkg.com/assert/-/assert-1.5.0.tgz#55c109aaf6e0aefdb3dc4b71240c70bf574b18eb" + integrity sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA== + dependencies: + object-assign "^4.1.1" + util "0.10.3" + +assign-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" + integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= + +ast-module-types@^2.3.1, ast-module-types@^2.3.2, ast-module-types@^2.4.0, ast-module-types@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/ast-module-types/-/ast-module-types-2.6.0.tgz#f9f367fd273bbe01e52f2c51b5f46b65801d5d7f" + integrity sha512-zXSoVaMrf2R+r+ISid5/9a8SXm1LLdkhHzh6pSRhj9jklzruOOl1hva1YmFT33wAstg/f9ZndJAlq1BSrFLSGA== + +async-each@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.3.tgz#b727dbf87d7651602f06f4d4ac387f47d91b0cbf" + integrity sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ== + +async-limiter@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" + integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== + +async@^2.6.2: + version "2.6.3" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" + integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + dependencies: + lodash "^4.17.14" + +async@^3.1.0, async@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/async/-/async-3.2.0.tgz#b3a2685c5ebb641d3de02d161002c60fc9f85720" + integrity sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= + +at-least-node@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" + integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== + +atob-lite@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/atob-lite/-/atob-lite-2.0.0.tgz#0fef5ad46f1bd7a8502c65727f0367d5ee43d696" + integrity sha1-D+9a1G8b16hQLGVyfwNn1e5D1pY= + +atob@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" + integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg== + +autoprefixer@^9.6.1: + version "9.8.0" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.8.0.tgz#68e2d2bef7ba4c3a65436f662d0a56a741e56511" + integrity sha512-D96ZiIHXbDmU02dBaemyAg53ez+6F5yZmapmgKcjm35yEe1uVDYI8hGW3VYoGRaG290ZFf91YxHrR518vC0u/A== + dependencies: + browserslist "^4.12.0" + caniuse-lite "^1.0.30001061" + chalk "^2.4.2" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^7.0.30" + postcss-value-parser "^4.1.0" + +aws-sdk@^2.689.0: + version "2.726.0" + resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.726.0.tgz#075aae1922e67e21285abae1bb081252cff4fec3" + integrity sha512-QRQ7MaW5dprdr/T3vCTC+J8TeUfpM45yWsBuATPcCV/oO8afFHVySwygvGLY4oJuo5Mf4mJn3+JYTquo6CqiaA== + dependencies: + buffer "4.9.2" + events "1.1.1" + ieee754 "1.1.13" + jmespath "0.15.0" + querystring "0.2.0" + sax "1.2.1" + url "0.10.3" + uuid "3.3.2" + xml2js "0.4.19" + +babel-code-frame@^6.22.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-loader@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.1.0.tgz#c611d5112bd5209abe8b9fa84c3e4da25275f1c3" + integrity sha512-7q7nC1tYOrqvUrN3LQK4GwSk/TQorZSOlO9C+RZDZpODgyN4ZlCqE5q9cDsyWOliN+aU9B4JX01xK9eJXowJLw== + dependencies: + find-cache-dir "^2.1.0" + loader-utils "^1.4.0" + mkdirp "^0.5.3" + pify "^4.0.1" + schema-utils "^2.6.5" + +babel-plugin-apply-mdx-type-prop@1.6.16: + version "1.6.16" + resolved "https://registry.yarnpkg.com/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.16.tgz#4becd65b3aa108f15c524a0b125ca7c81f3443d8" + integrity sha512-hjUd24Yhnr5NKtHpC2mcRBGjC6RUKGzSzjN9g5SdjT4WpL/JDlpmjyBf7vWsJJSXFvMIbzRyxF4lT9ukwOnj/w== + dependencies: + "@babel/helper-plugin-utils" "7.10.4" + "@mdx-js/util" "1.6.16" + +babel-plugin-dynamic-import-node@^2.3.0, babel-plugin-dynamic-import-node@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz#84fda19c976ec5c6defef57f9427b3def66e17a3" + integrity sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ== + dependencies: + object.assign "^4.1.0" + +babel-plugin-emotion@^9.2.11: + version "9.2.11" + resolved "https://registry.yarnpkg.com/babel-plugin-emotion/-/babel-plugin-emotion-9.2.11.tgz#319c005a9ee1d15bb447f59fe504c35fd5807728" + integrity sha512-dgCImifnOPPSeXod2znAmgc64NhaaOjGEHROR/M+lmStb3841yK1sgaDYAYMnlvWNz8GnpwIPN0VmNpbWYZ+VQ== + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@emotion/babel-utils" "^0.6.4" + "@emotion/hash" "^0.6.2" + "@emotion/memoize" "^0.6.1" + "@emotion/stylis" "^0.7.0" + babel-plugin-macros "^2.0.0" + babel-plugin-syntax-jsx "^6.18.0" + convert-source-map "^1.5.0" + find-root "^1.1.0" + mkdirp "^0.5.1" + source-map "^0.5.7" + touch "^2.0.1" + +babel-plugin-extract-import-names@1.6.16: + version "1.6.16" + resolved "https://registry.yarnpkg.com/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.16.tgz#b964004e794bdd62534c525db67d9e890d5cc079" + integrity sha512-Da6Ra0sbA/1Iavli8LdMbTjyrsOPaxMm4lrKl8VJN4sJI5F64qy2EpLj3+5INLvNPfW4ddwpStbfP3Rf3jIgcw== + dependencies: + "@babel/helper-plugin-utils" "7.10.4" + +babel-plugin-macros@^2.0.0: + version "2.8.0" + resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.8.0.tgz#0f958a7cc6556b1e65344465d99111a1e5e10138" + integrity sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg== + dependencies: + "@babel/runtime" "^7.7.2" + cosmiconfig "^6.0.0" + resolve "^1.12.0" + +"babel-plugin-styled-components@>= 1": + version "1.10.7" + resolved "https://registry.yarnpkg.com/babel-plugin-styled-components/-/babel-plugin-styled-components-1.10.7.tgz#3494e77914e9989b33cc2d7b3b29527a949d635c" + integrity sha512-MBMHGcIA22996n9hZRf/UJLVVgkEOITuR2SvjHLb5dSTUyR4ZRGn+ngITapes36FI3WLxZHfRhkA1ffHxihOrg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.0.0" + "@babel/helper-module-imports" "^7.0.0" + babel-plugin-syntax-jsx "^6.18.0" + lodash "^4.17.11" + +babel-plugin-syntax-jsx@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz#0af32a9a6e13ca7a3fd5069e62d7b0f58d0d8946" + integrity sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY= + +backoff@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/backoff/-/backoff-2.5.0.tgz#f616eda9d3e4b66b8ca7fca79f695722c5f8e26f" + integrity sha1-9hbtqdPktmuMp/ynn2lXIsX44m8= + dependencies: + precond "0.2" + +bail@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.5.tgz#b6fa133404a392cbc1f8c4bf63f5953351e7a776" + integrity sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ== + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + +base64-js@^1.0.2: + version "1.3.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.3.1.tgz#58ece8cb75dd07e71ed08c736abc5fac4dbf8df1" + integrity sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g== + +base@^0.11.1: + version "0.11.2" + resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" + integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg== + dependencies: + cache-base "^1.0.1" + class-utils "^0.3.5" + component-emitter "^1.2.1" + define-property "^1.0.0" + isobject "^3.0.1" + mixin-deep "^1.2.0" + pascalcase "^0.1.1" + +batch@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" + integrity sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY= + +before-after-hook@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.1.0.tgz#b6c03487f44e24200dd30ca5e6a1979c5d2fb635" + integrity sha512-IWIbu7pMqyw3EAJHzzHbWa85b6oud/yfKYg5rqB5hNE8CeMi3nX+2C2sj0HswfblST86hpVEOAb9x34NZd6P7A== + +better-ajv-errors@^0.6.1, better-ajv-errors@^0.6.7: + version "0.6.7" + resolved "https://registry.yarnpkg.com/better-ajv-errors/-/better-ajv-errors-0.6.7.tgz#b5344af1ce10f434fe02fc4390a5a9c811e470d1" + integrity sha512-PYgt/sCzR4aGpyNy5+ViSQ77ognMnWq7745zM+/flYO4/Yisdtp9wDQW2IKCyVYPUxQt3E/b5GBSwfhd1LPdlg== + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/runtime" "^7.0.0" + chalk "^2.4.1" + core-js "^3.2.1" + json-to-ast "^2.0.3" + jsonpointer "^4.0.1" + leven "^3.1.0" + +bfj@^6.1.1: + version "6.1.2" + resolved "https://registry.yarnpkg.com/bfj/-/bfj-6.1.2.tgz#325c861a822bcb358a41c78a33b8e6e2086dde7f" + integrity sha512-BmBJa4Lip6BPRINSZ0BPEIfB1wUY/9rwbwvIHQA1KjX9om29B6id0wnWXq7m3bn5JrUVjeOTnVuhPT1FiHwPGw== + dependencies: + bluebird "^3.5.5" + check-types "^8.0.3" + hoopy "^0.1.4" + tryer "^1.0.1" + +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== + +binary-extensions@^1.0.0: + version "1.13.1" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.13.1.tgz#598afe54755b2868a5330d2aff9d4ebb53209b65" + integrity sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw== + +binary-extensions@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.0.0.tgz#23c0df14f6a88077f5f986c0d167ec03c3d5537c" + integrity sha512-Phlt0plgpIIBOGTT/ehfFnbNlfsDEiqmzE2KRXoX1bLIlir4X/MR+zSyBEkL05ffWgnRSf/DXv+WrUAVr93/ow== + +bindings@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" + integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== + dependencies: + file-uri-to-path "1.0.0" + +bl@^1.0.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/bl/-/bl-1.2.2.tgz#a160911717103c07410cef63ef51b397c025af9c" + integrity sha512-e8tQYnZodmebYDWGH7KMRvtzKXaJHx3BbilrgZCfvyLUYdKpK1t5PSPmpkny/SgiTSCnjfLW7v5rlONXVFkQEA== + dependencies: + readable-stream "^2.3.5" + safe-buffer "^5.1.1" + +bl@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/bl/-/bl-4.0.2.tgz#52b71e9088515d0606d9dd9cc7aa48dc1f98e73a" + integrity sha512-j4OH8f6Qg2bGuWfRiltT2HYGx0e1QcBTrK9KAHNMwMZdQnDZFk0ZSYIpADjYCB3U12nicC5tVJwSIhwOWjb4RQ== + dependencies: + buffer "^5.5.0" + inherits "^2.0.4" + readable-stream "^3.4.0" + +bluebird@^3.5.5, bluebird@^3.7.1: + version "3.7.2" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" + integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.4.0: + version "4.11.9" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.9.tgz#26d556829458f9d1e81fc48952493d0ba3507828" + integrity sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw== + +bn.js@^5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.1.2.tgz#c9686902d3c9a27729f43ab10f9d79c2004da7b0" + integrity sha512-40rZaf3bUNKTVYu9sIeeEGOg7g14Yvnj9kH7b50EiwX0Q7A6umbvfI5tvHaOERH0XigqKkfLkFQxzb4e6CIXnA== + +body-parser@1.19.0, body-parser@^1.19.0: + version "1.19.0" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.0.tgz#96b2709e57c9c4e09a6fd66a8fd979844f69f08a" + integrity sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw== + dependencies: + bytes "3.1.0" + content-type "~1.0.4" + debug "2.6.9" + depd "~1.1.2" + http-errors "1.7.2" + iconv-lite "0.4.24" + on-finished "~2.3.0" + qs "6.7.0" + raw-body "2.4.0" + type-is "~1.6.17" + +bonjour@^3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/bonjour/-/bonjour-3.5.0.tgz#8e890a183d8ee9a2393b3844c691a42bcf7bc9f5" + integrity sha1-jokKGD2O6aI5OzhExpGkK897yfU= + dependencies: + array-flatten "^2.1.0" + deep-equal "^1.0.1" + dns-equal "^1.0.0" + dns-txt "^2.0.2" + multicast-dns "^6.0.1" + multicast-dns-service-types "^1.1.0" + +boolbase@^1.0.0, boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= + +boxen@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-1.3.0.tgz#55c6c39a8ba58d9c61ad22cd877532deb665a20b" + integrity sha512-TNPjfTr432qx7yOjQyaXm3dSR0MH9vXp7eT1BFSl/C51g+EFnOR9hTg1IreahGBmDNCehscshe45f+C1TBZbLw== + dependencies: + ansi-align "^2.0.0" + camelcase "^4.0.0" + chalk "^2.0.1" + cli-boxes "^1.0.0" + string-width "^2.0.0" + term-size "^1.2.0" + widest-line "^2.0.0" + +boxen@^4.1.0, boxen@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-4.2.0.tgz#e411b62357d6d6d36587c8ac3d5d974daa070e64" + integrity sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ== + dependencies: + ansi-align "^3.0.0" + camelcase "^5.3.1" + chalk "^3.0.0" + cli-boxes "^2.2.0" + string-width "^4.1.0" + term-size "^2.1.0" + type-fest "^0.8.1" + widest-line "^3.1.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^2.3.1, braces@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" + integrity sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w== + dependencies: + arr-flatten "^1.1.0" + array-unique "^0.3.2" + extend-shallow "^2.0.1" + fill-range "^4.0.0" + isobject "^3.0.1" + repeat-element "^1.1.2" + snapdragon "^0.8.1" + snapdragon-node "^2.0.1" + split-string "^3.0.2" + to-regex "^3.0.1" + +braces@^3.0.1, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +brorand@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" + integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= + +browserify-aes@^1.0.0, browserify-aes@^1.0.4: + version "1.2.0" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" + integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== + dependencies: + buffer-xor "^1.0.3" + cipher-base "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.3" + inherits "^2.0.1" + safe-buffer "^5.0.1" + +browserify-cipher@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" + integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w== + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c" + integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A== + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +browserify-rsa@^4.0.0, browserify-rsa@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524" + integrity sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ= + dependencies: + bn.js "^4.1.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.2.0.tgz#545d0b1b07e6b2c99211082bf1b12cce7a0b0e11" + integrity sha512-hEZC1KEeYuoHRqhGhTy6gWrpJA3ZDjFWv0DE61643ZnOXAKJb3u7yWcrU0mMc9SwAqK1n7myPGndkp0dFG7NFA== + dependencies: + bn.js "^5.1.1" + browserify-rsa "^4.0.1" + create-hash "^1.2.0" + create-hmac "^1.1.7" + elliptic "^6.5.2" + inherits "^2.0.4" + parse-asn1 "^5.1.5" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +browserify-zlib@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" + integrity sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA== + dependencies: + pako "~1.0.5" + +browserslist@4.10.0: + version "4.10.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.10.0.tgz#f179737913eaf0d2b98e4926ac1ca6a15cbcc6a9" + integrity sha512-TpfK0TDgv71dzuTsEAlQiHeWQ/tiPqgNZVdv046fvNtBZrjbv2O3TsWCDU0AWGJJKCF/KsjNdLzR9hXOsh/CfA== + dependencies: + caniuse-lite "^1.0.30001035" + electron-to-chromium "^1.3.378" + node-releases "^1.1.52" + pkg-up "^3.1.0" + +browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.6.4, browserslist@^4.8.5: + version "4.12.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.12.0.tgz#06c6d5715a1ede6c51fc39ff67fd647f740b656d" + integrity sha512-UH2GkcEDSI0k/lRkuDSzFl9ZZ87skSy9w2XAn1MsZnL+4c4rqbBd3e82UWHbYDpztABrPBhZsTEeuxVfHppqDg== + dependencies: + caniuse-lite "^1.0.30001043" + electron-to-chromium "^1.3.413" + node-releases "^1.1.53" + pkg-up "^2.0.0" + +btoa-lite@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/btoa-lite/-/btoa-lite-1.0.0.tgz#337766da15801210fdd956c22e9c6891ab9d0337" + integrity sha1-M3dm2hWAEhD92VbCLpxokaudAzc= + +buble@0.19.6: + version "0.19.6" + resolved "https://registry.yarnpkg.com/buble/-/buble-0.19.6.tgz#915909b6bd5b11ee03b1c885ec914a8b974d34d3" + integrity sha512-9kViM6nJA1Q548Jrd06x0geh+BG2ru2+RMDkIHHgJY/8AcyCs34lTHwra9BX7YdPrZXd5aarkpr/SY8bmPgPdg== + dependencies: + chalk "^2.4.1" + magic-string "^0.25.1" + minimist "^1.2.0" + os-homedir "^1.0.1" + regexpu-core "^4.2.0" + vlq "^1.0.0" + +buffer-alloc-unsafe@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz#bd7dc26ae2972d0eda253be061dba992349c19f0" + integrity sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg== + +buffer-alloc@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/buffer-alloc/-/buffer-alloc-1.2.0.tgz#890dd90d923a873e08e10e5fd51a57e5b7cce0ec" + integrity sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow== + dependencies: + buffer-alloc-unsafe "^1.1.0" + buffer-fill "^1.0.0" + +buffer-crc32@^0.2.1, buffer-crc32@^0.2.13, buffer-crc32@~0.2.3: + version "0.2.13" + resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz#0d333e3f00eac50aa1454abd30ef8c2a5d9a7242" + integrity sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI= + +buffer-fill@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/buffer-fill/-/buffer-fill-1.0.0.tgz#f8f78b76789888ef39f205cd637f68e702122b2c" + integrity sha1-+PeLdniYiO858gXNY39o5wISKyw= + +buffer-from@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== + +buffer-indexof@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-indexof/-/buffer-indexof-1.1.1.tgz#52fabcc6a606d1a00302802648ef68f639da268c" + integrity sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g== + +buffer-json@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/buffer-json/-/buffer-json-2.0.0.tgz#f73e13b1e42f196fe2fd67d001c7d7107edd7c23" + integrity sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw== + +buffer-xor@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" + integrity sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk= + +buffer@4.9.2, buffer@^4.3.0: + version "4.9.2" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.2.tgz#230ead344002988644841ab0244af8c44bbe3ef8" + integrity sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg== + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + isarray "^1.0.0" + +buffer@^5.1.0, buffer@^5.2.1, buffer@^5.5.0: + version "5.6.0" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.6.0.tgz#a31749dc7d81d84db08abf937b6b8c4033f62786" + integrity sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw== + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + integrity sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug= + +builtins@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88" + integrity sha1-y5T662HIaWRR2zZTThQi+U8K7og= + +byline@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/byline/-/byline-5.0.0.tgz#741c5216468eadc457b03410118ad77de8c1ddb1" + integrity sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE= + +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + integrity sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg= + +bytes@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6" + integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg== + +cacache@^12.0.2, cacache@^12.0.3: + version "12.0.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-12.0.4.tgz#668bcbd105aeb5f1d92fe25570ec9525c8faa40c" + integrity sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ== + dependencies: + bluebird "^3.5.5" + chownr "^1.1.1" + figgy-pudding "^3.5.1" + glob "^7.1.4" + graceful-fs "^4.1.15" + infer-owner "^1.0.3" + lru-cache "^5.1.1" + mississippi "^3.0.0" + mkdirp "^0.5.1" + move-concurrently "^1.0.1" + promise-inflight "^1.0.1" + rimraf "^2.6.3" + ssri "^6.0.1" + unique-filename "^1.1.1" + y18n "^4.0.0" + +cacache@^13.0.1: + version "13.0.1" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-13.0.1.tgz#a8000c21697089082f85287a1aec6e382024a71c" + integrity sha512-5ZvAxd05HDDU+y9BVvcqYu2LLXmPnQ0hW62h32g4xBTgL/MppR4/04NHfj/ycM2y6lmTnbw6HVi+1eN0Psba6w== + dependencies: + chownr "^1.1.2" + figgy-pudding "^3.5.1" + fs-minipass "^2.0.0" + glob "^7.1.4" + graceful-fs "^4.2.2" + infer-owner "^1.0.4" + lru-cache "^5.1.1" + minipass "^3.0.0" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.2" + mkdirp "^0.5.1" + move-concurrently "^1.0.1" + p-map "^3.0.0" + promise-inflight "^1.0.1" + rimraf "^2.7.1" + ssri "^7.0.0" + unique-filename "^1.1.1" + +cache-base@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" + integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ== + dependencies: + collection-visit "^1.0.0" + component-emitter "^1.2.1" + get-value "^2.0.6" + has-value "^1.0.0" + isobject "^3.0.1" + set-value "^2.0.0" + to-object-path "^0.3.0" + union-value "^1.0.0" + unset-value "^1.0.0" + +cache-loader@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cache-loader/-/cache-loader-4.1.0.tgz#9948cae353aec0a1fcb1eafda2300816ec85387e" + integrity sha512-ftOayxve0PwKzBF/GLsZNC9fJBXl8lkZE3TOsjkboHfVHVkL39iUEs1FO07A33mizmci5Dudt38UZrrYXDtbhw== + dependencies: + buffer-json "^2.0.0" + find-cache-dir "^3.0.0" + loader-utils "^1.2.3" + mkdirp "^0.5.1" + neo-async "^2.6.1" + schema-utils "^2.0.0" + +cacheable-request@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" + integrity sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0= + dependencies: + clone-response "1.0.2" + get-stream "3.0.0" + http-cache-semantics "3.8.1" + keyv "3.0.0" + lowercase-keys "1.0.0" + normalize-url "2.0.1" + responselike "1.0.2" + +cacheable-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" + integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== + dependencies: + clone-response "^1.0.2" + get-stream "^5.1.0" + http-cache-semantics "^4.0.0" + keyv "^3.0.0" + lowercase-keys "^2.0.0" + normalize-url "^4.1.0" + responselike "^1.0.2" + +cachedir@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/cachedir/-/cachedir-2.3.0.tgz#0c75892a052198f0b21c7c1804d8331edfcae0e8" + integrity sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw== + +call-me-maybe@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b" + integrity sha1-JtII6onje1y95gJQoV8DHBak1ms= + +caller-callsite@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134" + integrity sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ= + dependencies: + callsites "^2.0.0" + +caller-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4" + integrity sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ= + dependencies: + caller-callsite "^2.0.0" + +callsites@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50" + integrity sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA= + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camel-case@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.1.tgz#1fc41c854f00e2f7d0139dfeba1542d6896fe547" + integrity sha512-7fa2WcG4fYFkclIvEmxBbTvmibwF2/agfEBc6q3lOpVu0A13ltLsA+Hr/8Hp6kp5f+G7hKi6t8lys6XxP+1K6Q== + dependencies: + pascal-case "^3.1.1" + tslib "^1.10.0" + +camelcase-css@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5" + integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== + +camelcase@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0= + +camelcase@^5.0.0, camelcase@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +camelcase@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.0.0.tgz#5259f7c30e35e278f1bdc2a4d91230b37cad981e" + integrity sha512-8KMDF1Vz2gzOq54ONPJS65IvTUaB1cHJ2DMM7MbPmLZljDH1qpzzLsWdiN9pHh6qvkRVDTi/07+eNGch/oLU4w== + +camelize@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/camelize/-/camelize-1.0.0.tgz#164a5483e630fa4321e5af07020e531831b2609b" + integrity sha1-FkpUg+Yw+kMh5a8HAg5TGDGyYJs= + +caniuse-api@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" + integrity sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw== + dependencies: + browserslist "^4.0.0" + caniuse-lite "^1.0.0" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" + +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000981, caniuse-lite@^1.0.30001035, caniuse-lite@^1.0.30001043, caniuse-lite@^1.0.30001061: + version "1.0.30001084" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001084.tgz#00e471931eaefbeef54f46aa2203914d3c165669" + integrity sha512-ftdc5oGmhEbLUuMZ/Qp3mOpzfZLCxPYKcvGv6v2dJJ+8EdqcvZRbAGOiLmkM/PV1QGta/uwBs8/nCl6sokDW6w== + +capture-stack-trace@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.1.tgz#a6c0bbe1f38f3aa0b92238ecb6ff42c344d4135d" + integrity sha512-mYQLZnx5Qt1JgB1WEiMCf2647plpGeQ2NMR/5L0HNZzGQo4fuSPnK+wjfPnKZV0aiJDgzmWqqkV/g7JD+DW0qw== + +cardinal@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-2.1.1.tgz#7cc1055d822d212954d07b085dea251cc7bc5505" + integrity sha1-fMEFXYItISlU0HsIXeolHMe8VQU= + dependencies: + ansicolors "~0.3.2" + redeyed "~2.1.0" + +caw@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/caw/-/caw-2.0.1.tgz#6c3ca071fc194720883c2dc5da9b074bfc7e9e95" + integrity sha512-Cg8/ZSBEa8ZVY9HspcGUYaK63d/bN7rqS3CYCzEGUxuYv6UlmcjzDUz2fCFFHyTvUW5Pk0I+3hkA3iXlIj6guA== + dependencies: + get-proxy "^2.0.0" + isurl "^1.0.0-alpha5" + tunnel-agent "^0.6.0" + url-to-options "^1.0.1" + +ccount@^1.0.0, ccount@^1.0.3: + version "1.0.5" + resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.0.5.tgz#ac82a944905a65ce204eb03023157edf29425c17" + integrity sha512-MOli1W+nfbPLlKEhInaxhRdp7KVLFxLN5ykwzHgLsLI3H3gs5jjFAK4Eoj3OzzcxCtumDaI8onoVDeQyWaNTkw== + +chalk@2.4.2, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.4.1, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174" + integrity sha1-Zjs6ZItotV0EaQ1JFnqoN4WPIXQ= + dependencies: + ansi-styles "^1.1.0" + escape-string-regexp "^1.0.0" + has-ansi "^0.1.0" + strip-ansi "^0.3.0" + supports-color "^0.2.0" + +chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^3.0.0, chalk@^3.0.0-beta.2: + version "3.0.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" + integrity sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" + integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +character-entities-html4@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/character-entities-html4/-/character-entities-html4-1.1.4.tgz#0e64b0a3753ddbf1fdc044c5fd01d0199a02e125" + integrity sha512-HRcDxZuZqMx3/a+qrzxdBKBPUpxWEq9xw2OPZ3a/174ihfrQKVsFhqtthBInFy1zZ9GgZyFXOatNujm8M+El3g== + +character-entities-legacy@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz#94bc1845dce70a5bb9d2ecc748725661293d8fc1" + integrity sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA== + +character-entities@^1.0.0: + version "1.2.4" + resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.4.tgz#e12c3939b7eaf4e5b15e7ad4c5e28e1d48c5b16b" + integrity sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw== + +character-reference-invalid@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560" + integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg== + +chardet@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" + integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== + +check-types@^8.0.3: + version "8.0.3" + resolved "https://registry.yarnpkg.com/check-types/-/check-types-8.0.3.tgz#3356cca19c889544f2d7a95ed49ce508a0ecf552" + integrity sha512-YpeKZngUmG65rLudJ4taU7VLkOCTMhNl/u4ctNC56LQS/zJTyNH0Lrtwm1tfTsbLlwvlfsA2d1c8vCf/Kh2KwQ== + +cheerio@^0.22.0: + version "0.22.0" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-0.22.0.tgz#a9baa860a3f9b595a6b81b1a86873121ed3a269e" + integrity sha1-qbqoYKP5tZWmuBsahocxIe06Jp4= + dependencies: + css-select "~1.2.0" + dom-serializer "~0.1.0" + entities "~1.1.1" + htmlparser2 "^3.9.1" + lodash.assignin "^4.0.9" + lodash.bind "^4.1.4" + lodash.defaults "^4.0.1" + lodash.filter "^4.4.0" + lodash.flatten "^4.2.0" + lodash.foreach "^4.3.0" + lodash.map "^4.4.0" + lodash.merge "^4.4.0" + lodash.pick "^4.2.1" + lodash.reduce "^4.4.0" + lodash.reject "^4.4.0" + lodash.some "^4.4.0" + +chokidar@^2.1.8: + version "2.1.8" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.8.tgz#804b3a7b6a99358c3c5c61e71d8728f041cff917" + integrity sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg== + dependencies: + anymatch "^2.0.0" + async-each "^1.0.1" + braces "^2.3.2" + glob-parent "^3.1.0" + inherits "^2.0.3" + is-binary-path "^1.0.0" + is-glob "^4.0.0" + normalize-path "^3.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.2.1" + upath "^1.1.1" + optionalDependencies: + fsevents "^1.2.7" + +chokidar@^3.0.2: + version "3.4.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.4.1.tgz#e905bdecf10eaa0a0b1db0c664481cc4cbc22ba1" + integrity sha512-TQTJyr2stihpC4Sya9hs2Xh+O2wf+igjL36Y75xx2WdHuiICcn/XJza46Jwt0eT5hVpQOzo3FpY3cj3RVYLX0g== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.4.0" + optionalDependencies: + fsevents "~2.1.2" + +chokidar@^3.3.0, chokidar@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.4.0.tgz#b30611423ce376357c765b9b8f904b9fba3c0be8" + integrity sha512-aXAaho2VJtisB/1fg1+3nlLJqGOuewTzQpd/Tz0yTg2R0e4IGtshYvtjowyEumcBv2z+y4+kc75Mz7j5xJskcQ== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.4.0" + optionalDependencies: + fsevents "~2.1.2" + +chownr@^1.1.1, chownr@^1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" + integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg== + +chrome-trace-event@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz#234090ee97c7d4ad1a2c4beae27505deffc608a4" + integrity sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ== + dependencies: + tslib "^1.9.0" + +ci-info@^1.5.0, ci-info@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-1.6.0.tgz#2ca20dbb9ceb32d4524a683303313f0304b1e497" + integrity sha512-vsGdkwSCDpWmP80ncATX7iea5DWQemg1UgCW5J8tqjU3lYw4FBYuj89J0CTVomA7BEfvSZd84GmHko+MxFQU2A== + +ci-info@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" + integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== + +cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" + integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q== + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +class-utils@^0.3.5: + version "0.3.6" + resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" + integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg== + dependencies: + arr-union "^3.1.0" + define-property "^0.2.5" + isobject "^3.0.0" + static-extend "^0.1.1" + +classnames@^2.2.5, classnames@^2.2.6: + version "2.2.6" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" + integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== + +clean-css@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.3.tgz#507b5de7d97b48ee53d84adb0160ff6216380f78" + integrity sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA== + dependencies: + source-map "~0.6.0" + +clean-deep@^3.0.2, clean-deep@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/clean-deep/-/clean-deep-3.3.0.tgz#00509a2cb431fa83fb202aa759534681f0309172" + integrity sha512-zblSorO16fXSDnBU9SQay+WL6TLM8Lkh1pKiuspu/Ntgy5BPDXORZKj3F/6fgcBZ7c172Ppy8xWRo7J4D/mNfQ== + dependencies: + lodash.isempty "^4.4.0" + lodash.isplainobject "^4.0.6" + lodash.transform "^4.6.0" + +clean-stack@^2.0.0, clean-stack@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== + +clean-stack@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-3.0.0.tgz#a7c249369fcf0f33c7888c20ea3f3dc79620211f" + integrity sha512-RHxtgFvXsRQ+1AM7dlozLDY7ssmvUUh0XEnfnyhYgJTO6beNZHBogiaCwGM9Q3rFrUkYxOtsZRC0zAturg5bjg== + dependencies: + escape-string-regexp "4.0.0" + +cli-boxes@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-1.0.0.tgz#4fa917c3e59c94a004cd61f8ee509da651687143" + integrity sha1-T6kXw+WclKAEzWH47lCdplFocUM= + +cli-boxes@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-2.2.0.tgz#538ecae8f9c6ca508e3c3c95b453fe93cb4c168d" + integrity sha512-gpaBrMAizVEANOpfZp/EEUixTXDyGt7DFzdK5hU+UbWt/J0lB0w20ncZj59Z9a93xHb9u12zF5BS6i9RKbtg4w== + +cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU= + dependencies: + restore-cursor "^2.0.0" + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-progress@^3.4.0: + version "3.8.2" + resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.8.2.tgz#abaf1fc6d6401351f16f068117a410554a0eb8c7" + integrity sha512-qRwBxLldMSfxB+YGFgNRaj5vyyHe1yMpVeDL79c+7puGujdKJHQHydgqXDcrkvQgJ5U/d3lpf6vffSoVVUftVQ== + dependencies: + colors "^1.1.2" + string-width "^4.2.0" + +cli-spinners@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-1.3.1.tgz#002c1990912d0d59580c93bd36c056de99e4259a" + integrity sha512-1QL4544moEsDVH9T/l6Cemov/37iv1RtoKf7NJ04A60+4MREXNfx/QvavbH6QoGdsD4N4Mwy49cmaINR/o2mdg== + +cli-spinners@^2.0.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.4.0.tgz#c6256db216b878cfba4720e719cec7cf72685d7f" + integrity sha512-sJAofoarcm76ZGpuooaO0eDy8saEy+YoZBLjC4h8srt4jeBnkYeOgqxgsJQTpyt2LjI5PTfLJHSL+41Yu4fEJA== + +cli-ux@^4.9.0: + version "4.9.3" + resolved "https://registry.yarnpkg.com/cli-ux/-/cli-ux-4.9.3.tgz#4c3e070c1ea23eef010bbdb041192e0661be84ce" + integrity sha512-/1owvF0SZ5Gn54cgrikJ0QskgTzeg30HGjkmjFoaHDJzAqFpuX1DBpFR8aLvsE1J5s9MgeYRENQK4BFwOag5VA== + dependencies: + "@oclif/errors" "^1.2.2" + "@oclif/linewrap" "^1.0.0" + "@oclif/screen" "^1.0.3" + ansi-escapes "^3.1.0" + ansi-styles "^3.2.1" + cardinal "^2.1.1" + chalk "^2.4.1" + clean-stack "^2.0.0" + extract-stack "^1.0.0" + fs-extra "^7.0.0" + hyperlinker "^1.0.0" + indent-string "^3.2.0" + is-wsl "^1.1.0" + lodash "^4.17.11" + password-prompt "^1.0.7" + semver "^5.6.0" + strip-ansi "^5.0.0" + supports-color "^5.5.0" + supports-hyperlinks "^1.0.1" + treeify "^1.1.0" + tslib "^1.9.3" + +cli-ux@^5.2.1: + version "5.4.9" + resolved "https://registry.yarnpkg.com/cli-ux/-/cli-ux-5.4.9.tgz#2e6dc990d977efea2592daa14f78ace092666e1a" + integrity sha512-4yCKJbFQqNQxf1v0E5T5aBJLt3SbW6dXc/R7zHp4ycdPMg9fAy5f2vhPsWgXEGCMQg+fgN0Sp7EYcZ1XGkFyUA== + dependencies: + "@oclif/command" "^1.6.0" + "@oclif/errors" "^1.2.1" + "@oclif/linewrap" "^1.0.0" + "@oclif/screen" "^1.0.3" + ansi-escapes "^4.3.0" + ansi-styles "^4.2.0" + cardinal "^2.1.1" + chalk "^3.0.0" + clean-stack "^3.0.0" + cli-progress "^3.4.0" + extract-stack "^2.0.0" + fs-extra "^9.0.1" + hyperlinker "^1.0.0" + indent-string "^4.0.0" + is-wsl "^2.2.0" + js-yaml "^3.13.1" + lodash "^4.17.11" + natural-orderby "^2.0.1" + object-treeify "^1.1.4" + password-prompt "^1.1.2" + semver "^5.6.0" + string-width "^4.2.0" + strip-ansi "^5.1.0" + supports-color "^7.1.0" + supports-hyperlinks "^1.0.1" + tslib "^2.0.0" + +cli-width@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.1.tgz#b0433d0b4e9c847ef18868a4ef16fd5fc8271c48" + integrity sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw== + +cli-width@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" + integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== + +clipboard@^2.0.0: + version "2.0.6" + resolved "https://registry.yarnpkg.com/clipboard/-/clipboard-2.0.6.tgz#52921296eec0fdf77ead1749421b21c968647376" + integrity sha512-g5zbiixBRk/wyKakSwCKd7vQXDjFnAMGHoEyBogG/bw9kTD9GvdAvaoRR1ALcEzt3pVKxZR0pViekPMIS0QyGg== + dependencies: + good-listener "^1.2.2" + select "^1.1.2" + tiny-emitter "^2.0.0" + +cliui@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49" + integrity sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ== + dependencies: + string-width "^2.1.1" + strip-ansi "^4.0.0" + wrap-ansi "^2.0.0" + +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== + dependencies: + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" + +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^6.2.0" + +clone-deep@^0.2.4: + version "0.2.4" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.2.4.tgz#4e73dd09e9fb971cc38670c5dced9c1896481cc6" + integrity sha1-TnPdCen7lxzDhnDF3O2cGJZIHMY= + dependencies: + for-own "^0.1.3" + is-plain-object "^2.0.1" + kind-of "^3.0.2" + lazy-cache "^1.0.3" + shallow-clone "^0.1.2" + +clone-response@1.0.2, clone-response@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= + dependencies: + mimic-response "^1.0.0" + +clone@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= + +clsx@^1.1.0, clsx@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.1.1.tgz#98b3134f9abbdf23b2663491ace13c5c03a73188" + integrity sha512-6/bPho624p3S2pMyvP5kKBPXnI3ufHLObBFCfgx+LkeR5lg2XYy2hqZqUf45ypD8COn2bhgGJSUE+l5dhNBieA== + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= + +coa@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/coa/-/coa-2.0.2.tgz#43f6c21151b4ef2bf57187db0d73de229e3e7ec3" + integrity sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA== + dependencies: + "@types/q" "^1.5.1" + chalk "^2.4.1" + q "^1.1.2" + +code-error-fragment@0.0.230: + version "0.0.230" + resolved "https://registry.yarnpkg.com/code-error-fragment/-/code-error-fragment-0.0.230.tgz#d736d75c832445342eca1d1fedbf17d9618b14d7" + integrity sha512-cadkfKp6932H8UkhzE/gcUqhRMNf8jHzkAN7+5Myabswaghu4xABTgPHDCjW+dBAJxj/SpkTYokpzDqY4pCzQw== + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= + +collapse-white-space@^1.0.0, collapse-white-space@^1.0.2: + version "1.0.6" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.6.tgz#e63629c0016665792060dbbeb79c42239d2c5287" + integrity sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ== + +collection-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" + integrity sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA= + dependencies: + map-visit "^1.0.0" + object-visit "^1.0.0" + +color-convert@^1.9.0, color-convert@^1.9.1: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +color-name@^1.0.0, color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +color-string@^1.5.2: + version "1.5.3" + resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.5.3.tgz#c9bbc5f01b58b5492f3d6857459cb6590ce204cc" + integrity sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw== + dependencies: + color-name "^1.0.0" + simple-swizzle "^0.2.2" + +color@3.0.x: + version "3.0.0" + resolved "https://registry.yarnpkg.com/color/-/color-3.0.0.tgz#d920b4328d534a3ac8295d68f7bd4ba6c427be9a" + integrity sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w== + dependencies: + color-convert "^1.9.1" + color-string "^1.5.2" + +color@^3.0.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/color/-/color-3.1.2.tgz#68148e7f85d41ad7649c5fa8c8106f098d229e10" + integrity sha512-vXTJhHebByxZn3lDvDJYw4lR5+uB3vuoHsuYA5AKuxRVn5wzzIfQKGLBmgdVRHKTJYeK5rvJcHnrd0Li49CFpg== + dependencies: + color-convert "^1.9.1" + color-string "^1.5.2" + +colors@^1.1.2, colors@^1.2.1: + version "1.4.0" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" + integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== + +colorspace@1.1.x: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colorspace/-/colorspace-1.1.2.tgz#e0128950d082b86a2168580796a0aa5d6c68d8c5" + integrity sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ== + dependencies: + color "3.0.x" + text-hex "1.0.x" + +combined-stream@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +comma-separated-tokens@^1.0.0: + version "1.0.8" + resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz#632b80b6117867a158f1080ad498b2fbe7e3f5ea" + integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw== + +commander@^2.18.0, commander@^2.20.0, commander@^2.20.3, commander@^2.3.0, commander@^2.8.1: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" + integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== + +commander@^4.0.1, commander@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068" + integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA== + +commander@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" + integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== + +common-path-prefix@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/common-path-prefix/-/common-path-prefix-2.0.0.tgz#25b0265f318bf093a6ec630813aac97b29dea230" + integrity sha512-Lb9qbwwyQdRDmyib0qur7BC9/GHIbviTaQebayFsGC/n77AwFhZINCcJkQx2qVv9LJsA8F5ex65F2qrOfWGUyw== + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= + +component-emitter@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0" + integrity sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg== + +component-props@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/component-props/-/component-props-1.1.1.tgz#f9b7df9b9927b6e6d97c9bd272aa867670f34944" + integrity sha1-+bffm5kntubZfJvScqqGdnDzSUQ= + +component-xor@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/component-xor/-/component-xor-0.0.4.tgz#c55d83ccc1b94cd5089a4e93fa7891c7263e59aa" + integrity sha1-xV2DzMG5TNUImk6T+niRxyY+Wao= + +compress-commons@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/compress-commons/-/compress-commons-3.0.0.tgz#833944d84596e537224dd91cf92f5246823d4f1d" + integrity sha512-FyDqr8TKX5/X0qo+aVfaZ+PVmNJHJeckFBlq8jZGSJOgnynhfifoyl24qaqdUdDIBe0EVTHByN6NAkqYvE/2Xg== + dependencies: + buffer-crc32 "^0.2.13" + crc32-stream "^3.0.1" + normalize-path "^3.0.0" + readable-stream "^2.3.7" + +compressible@~2.0.16: + version "2.0.18" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba" + integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg== + dependencies: + mime-db ">= 1.43.0 < 2" + +compression@^1.7.4: + version "1.7.4" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" + integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ== + dependencies: + accepts "~1.3.5" + bytes "3.0.0" + compressible "~2.0.16" + debug "2.6.9" + on-headers "~1.0.2" + safe-buffer "5.1.2" + vary "~1.1.2" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +concat-stream@^1.5.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +concordance@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/concordance/-/concordance-4.0.0.tgz#5932fdee397d129bdbc3a1885fbe69839b1b7e15" + integrity sha512-l0RFuB8RLfCS0Pt2Id39/oCPykE01pyxgAFypWTlaGRgvLkZrtczZ8atEHpTeEIW+zYWXTBuA9cCSeEOScxReQ== + dependencies: + date-time "^2.1.0" + esutils "^2.0.2" + fast-diff "^1.1.2" + js-string-escape "^1.0.1" + lodash.clonedeep "^4.5.0" + lodash.flattendeep "^4.4.0" + lodash.islength "^4.0.1" + lodash.merge "^4.6.1" + md5-hex "^2.0.0" + semver "^5.5.1" + well-known-symbols "^2.0.0" + +config-chain@^1.1.11: + version "1.1.12" + resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.12.tgz#0fde8d091200eb5e808caf25fe618c02f48e4efa" + integrity sha512-a1eOIcu8+7lUInge4Rpf/n4Krkf3Dd9lqhljRzII1/Zno/kRtUWnznPO3jOKBmTEktkt3fkxisUcivoj0ebzoA== + dependencies: + ini "^1.3.4" + proto-list "~1.2.1" + +configstore@^3.0.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-3.1.2.tgz#c6f25defaeef26df12dd33414b001fe81a543f8f" + integrity sha512-vtv5HtGjcYUgFrXc6Kx747B83MRRVS5R1VTEQoXvuP+kMI+if6uywV0nDGoiydJRy4yk7h9od5Og0kxx4zUXmw== + dependencies: + dot-prop "^4.1.0" + graceful-fs "^4.1.2" + make-dir "^1.0.0" + unique-string "^1.0.0" + write-file-atomic "^2.0.0" + xdg-basedir "^3.0.0" + +configstore@^5.0.0, configstore@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-5.0.1.tgz#d365021b5df4b98cdd187d6a3b0e3f6a7cc5ed96" + integrity sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA== + dependencies: + dot-prop "^5.2.0" + graceful-fs "^4.1.2" + make-dir "^3.0.0" + unique-string "^2.0.0" + write-file-atomic "^3.0.0" + xdg-basedir "^4.0.0" + +connect-history-api-fallback@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc" + integrity sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg== + +consola@^2.10.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/consola/-/consola-2.13.0.tgz#5a4dc75e1b1fddbd8b10728e7790a2e54efeeeb7" + integrity sha512-Jw+8qpL0yrpfqH9m90fWoDRQyn8TYU6Aegpl4UofoP81VYvQLoOWMpFw2vQ3U/cyLRRzTc/CyNC6YYVzZFU8Eg== + +console-browserify@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.2.0.tgz#67063cef57ceb6cf4993a2ab3a55840ae8c49336" + integrity sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA== + +constants-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" + integrity sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U= + +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + integrity sha1-DPaLud318r55YcOoUXjLhdunjLQ= + +content-disposition@0.5.3, content-disposition@^0.5.2: + version "0.5.3" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd" + integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g== + dependencies: + safe-buffer "5.1.2" + +content-type@^1.0.4, content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== + +convert-source-map@^1.5.0, convert-source-map@^1.5.1, convert-source-map@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442" + integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA== + dependencies: + safe-buffer "~5.1.1" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw= + +cookie@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba" + integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg== + +cookie@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.1.tgz#afd713fe26ebd21ba95ceb61f9a8116e50a537d1" + integrity sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA== + +copy-concurrently@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0" + integrity sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A== + dependencies: + aproba "^1.1.1" + fs-write-stream-atomic "^1.0.8" + iferr "^0.1.5" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.0" + +copy-descriptor@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" + integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= + +copy-template-dir@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/copy-template-dir/-/copy-template-dir-1.4.0.tgz#cb2bd62415abe963a53bb867bb24379df3998112" + integrity sha512-xkXSJhvKz4MfLbVkZ7GyCaFo4ciB3uKI/HHzkGwj1eyTH5+7RTFxW5CE0irWAZgV5oFcO9hd6+NVXAtY9hlo7Q== + dependencies: + end-of-stream "^1.1.0" + graceful-fs "^4.1.3" + maxstache "^1.0.0" + maxstache-stream "^1.0.0" + mkdirp "^0.5.1" + noop2 "^2.0.0" + pump "^1.0.0" + readdirp "^2.0.0" + run-parallel "^1.1.4" + +copy-text-to-clipboard@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/copy-text-to-clipboard/-/copy-text-to-clipboard-2.2.0.tgz#329dd6daf8c42034c763ace567418401764579ae" + integrity sha512-WRvoIdnTs1rgPMkgA2pUOa/M4Enh2uzCwdKsOMYNAJiz/4ZvEJgmbF4OmninPmlFdAWisfeh0tH+Cpf7ni3RqQ== + +copy-webpack-plugin@^5.0.5: + version "5.1.1" + resolved "https://registry.yarnpkg.com/copy-webpack-plugin/-/copy-webpack-plugin-5.1.1.tgz#5481a03dea1123d88a988c6ff8b78247214f0b88" + integrity sha512-P15M5ZC8dyCjQHWwd4Ia/dm0SgVvZJMYeykVIVYXbGyqO4dWB5oyPHp9i7wjwo5LhtlhKbiBCdS2NvM07Wlybg== + dependencies: + cacache "^12.0.3" + find-cache-dir "^2.1.0" + glob-parent "^3.1.0" + globby "^7.1.1" + is-glob "^4.0.1" + loader-utils "^1.2.3" + minimatch "^3.0.4" + normalize-path "^3.0.0" + p-limit "^2.2.1" + schema-utils "^1.0.0" + serialize-javascript "^2.1.2" + webpack-log "^2.0.0" + +core-js-compat@^3.6.2: + version "3.6.5" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.6.5.tgz#2a51d9a4e25dfd6e690251aa81f99e3c05481f1c" + integrity sha512-7ItTKOhOZbznhXAQ2g/slGg1PJV5zDO/WdkTwi7UEOJmkvsE32PWvx6mKtDjiMpjnR2CNf6BAD6sSxIlv7ptng== + dependencies: + browserslist "^4.8.5" + semver "7.0.0" + +core-js-pure@^3.0.0: + version "3.6.5" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.6.5.tgz#c79e75f5e38dbc85a662d91eea52b8256d53b813" + integrity sha512-lacdXOimsiD0QyNf9BC/mxivNJ/ybBGJXQFKzRekp1WTHoVUWsUHEn+2T8GJAzzIhyOuXA+gOxCVN3l+5PLPUA== + +core-js@^1.0.0: + version "1.2.7" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636" + integrity sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY= + +core-js@^2.4.1, core-js@^2.6.5: + version "2.6.11" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.11.tgz#38831469f9922bded8ee21c9dc46985e0399308c" + integrity sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg== + +core-js@^3.2.1, core-js@^3.6.5: + version "3.6.5" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.6.5.tgz#7395dc273af37fb2e50e9bd3d9fe841285231d1a" + integrity sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA== + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= + +cosmiconfig@^5.0.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.2.1.tgz#040f726809c591e77a17c0a3626ca45b4f168b1a" + integrity sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA== + dependencies: + import-fresh "^2.0.0" + is-directory "^0.3.1" + js-yaml "^3.13.1" + parse-json "^4.0.0" + +cosmiconfig@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" + integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.1.0" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.7.2" + +cp-file@^6.1.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/cp-file/-/cp-file-6.2.0.tgz#40d5ea4a1def2a9acdd07ba5c0b0246ef73dc10d" + integrity sha512-fmvV4caBnofhPe8kOcitBwSn2f39QLjnAnGq3gO9dfd75mUytzKNZB1hde6QHunW2Rt+OwuBOMc3i1tNElbszA== + dependencies: + graceful-fs "^4.1.2" + make-dir "^2.0.0" + nested-error-stacks "^2.0.0" + pify "^4.0.1" + safe-buffer "^5.0.1" + +cp-file@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/cp-file/-/cp-file-7.0.0.tgz#b9454cfd07fe3b974ab9ea0e5f29655791a9b8cd" + integrity sha512-0Cbj7gyvFVApzpK/uhCtQ/9kE9UnYpxMzaq5nQQC/Dh4iaj5fxp7iEFIullrYwzj8nf0qnsI1Qsx34hAeAebvw== + dependencies: + graceful-fs "^4.1.2" + make-dir "^3.0.0" + nested-error-stacks "^2.0.0" + p-event "^4.1.0" + +cpy@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/cpy/-/cpy-8.1.0.tgz#e8ac07f3caeb0113bd55326e5cda052c19fa6c60" + integrity sha512-XwlImkjPxMr01qXqC564VD4rfcDQ2eKtYmFlCy0ixsLRJ1cwYVUBh+v47jsQTO1IrmvdjqO813VpDQ0JiTuOdA== + dependencies: + arrify "^2.0.1" + cp-file "^7.0.0" + globby "^9.2.0" + has-glob "^1.0.0" + junk "^3.1.0" + nested-error-stacks "^2.1.0" + p-all "^2.1.0" + p-filter "^2.1.0" + p-map "^3.0.0" + +crc32-stream@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/crc32-stream/-/crc32-stream-3.0.1.tgz#cae6eeed003b0e44d739d279de5ae63b171b4e85" + integrity sha512-mctvpXlbzsvK+6z8kJwSJ5crm7yBwrQMTybJzMw1O4lLGJqjlDCXY2Zw7KheiA6XBEcBmfLx1D88mjRGVJtY9w== + dependencies: + crc "^3.4.4" + readable-stream "^3.4.0" + +crc@^3.4.4: + version "3.8.0" + resolved "https://registry.yarnpkg.com/crc/-/crc-3.8.0.tgz#ad60269c2c856f8c299e2c4cc0de4556914056c6" + integrity sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ== + dependencies: + buffer "^5.1.0" + +create-ecdh@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.3.tgz#c9111b6f33045c4697f144787f9254cdc77c45ff" + integrity sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw== + dependencies: + bn.js "^4.1.0" + elliptic "^6.0.0" + +create-emotion@^9.2.12: + version "9.2.12" + resolved "https://registry.yarnpkg.com/create-emotion/-/create-emotion-9.2.12.tgz#0fc8e7f92c4f8bb924b0fef6781f66b1d07cb26f" + integrity sha512-P57uOF9NL2y98Xrbl2OuiDQUZ30GVmASsv5fbsjF4Hlraip2kyAvMm+2PoYUvFFw03Fhgtxk3RqZSm2/qHL9hA== + dependencies: + "@emotion/hash" "^0.6.2" + "@emotion/memoize" "^0.6.1" + "@emotion/stylis" "^0.7.0" + "@emotion/unitless" "^0.6.2" + csstype "^2.5.2" + stylis "^3.5.0" + stylis-rule-sheet "^0.0.10" + +create-error-class@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" + integrity sha1-Br56vvlHo/FKMP1hBnHUAbyot7Y= + dependencies: + capture-stack-trace "^1.0.0" + +create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" + integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg== + dependencies: + cipher-base "^1.0.1" + inherits "^2.0.1" + md5.js "^1.3.4" + ripemd160 "^2.0.1" + sha.js "^2.4.0" + +create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" + integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== + dependencies: + cipher-base "^1.0.3" + create-hash "^1.1.0" + inherits "^2.0.1" + ripemd160 "^2.0.0" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +create-react-context@0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/create-react-context/-/create-react-context-0.2.2.tgz#9836542f9aaa22868cd7d4a6f82667df38019dca" + integrity sha512-KkpaLARMhsTsgp0d2NA/R94F/eDLbhXERdIq3LvX2biCAXcDvHYoOqHfWCHf1+OLj+HKBotLG3KqaOOf+C1C+A== + dependencies: + fbjs "^0.8.0" + gud "^1.0.0" + +cross-spawn@7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.1.tgz#0ab56286e0f7c24e153d04cc2aa027e43a9a5d14" + integrity sha512-u7v4o84SwFpD32Z8IIcPZ6z1/ie24O6RU3RbtL5Y316l3KuHVPx9ItBgWQ6VlfAFnRnTtMUrsQ9MUUTuEZjogg== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +cross-spawn@^5.0.1: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + integrity sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk= + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +cross-spawn@^6.0.0, cross-spawn@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" + integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + +cross-spawn@^7.0.0: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +crypto-browserify@^3.11.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg== + dependencies: + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" + +crypto-random-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-1.0.0.tgz#a230f64f568310e1498009940790ec99545bca7e" + integrity sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4= + +crypto-random-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5" + integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== + +css-blank-pseudo@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz#dfdefd3254bf8a82027993674ccf35483bfcb3c5" + integrity sha512-LHz35Hr83dnFeipc7oqFDmsjHdljj3TQtxGGiNWSOsTLIAubSm4TEz8qCaKFpk7idaQ1GfWscF4E6mgpBysA1w== + dependencies: + postcss "^7.0.5" + +css-color-keywords@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/css-color-keywords/-/css-color-keywords-1.0.0.tgz#fea2616dc676b2962686b3af8dbdbe180b244e05" + integrity sha1-/qJhbcZ2spYmhrOvjb2+GAskTgU= + +css-color-names@0.0.4, css-color-names@^0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + integrity sha1-gIrcLnnPhHOAabZGyyDsJ762KeA= + +css-declaration-sorter@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz#c198940f63a76d7e36c1e71018b001721054cb22" + integrity sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA== + dependencies: + postcss "^7.0.1" + timsort "^0.3.0" + +css-has-pseudo@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/css-has-pseudo/-/css-has-pseudo-0.10.0.tgz#3c642ab34ca242c59c41a125df9105841f6966ee" + integrity sha512-Z8hnfsZu4o/kt+AuFzeGpLVhFOGO9mluyHBaA2bA8aCGTwah5sT3WV/fTHH8UNZUytOIImuGPrl/prlb4oX4qQ== + dependencies: + postcss "^7.0.6" + postcss-selector-parser "^5.0.0-rc.4" + +css-loader@^3.4.2: + version "3.6.0" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-3.6.0.tgz#2e4b2c7e6e2d27f8c8f28f61bffcd2e6c91ef645" + integrity sha512-M5lSukoWi1If8dhQAUCvj4H8vUt3vOnwbQBH9DdTm/s4Ym2B/3dPMtYZeJmq7Q3S3Pa+I94DcZ7pc9bP14cWIQ== + dependencies: + camelcase "^5.3.1" + cssesc "^3.0.0" + icss-utils "^4.1.1" + loader-utils "^1.2.3" + normalize-path "^3.0.0" + postcss "^7.0.32" + postcss-modules-extract-imports "^2.0.0" + postcss-modules-local-by-default "^3.0.2" + postcss-modules-scope "^2.2.0" + postcss-modules-values "^3.0.0" + postcss-value-parser "^4.1.0" + schema-utils "^2.7.0" + semver "^6.3.0" + +css-prefers-color-scheme@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/css-prefers-color-scheme/-/css-prefers-color-scheme-3.1.1.tgz#6f830a2714199d4f0d0d0bb8a27916ed65cff1f4" + integrity sha512-MTu6+tMs9S3EUqzmqLXEcgNRbNkkD/TGFvowpeoWJn5Vfq7FMgsmRQs9X5NXAURiOBmOxm/lLjsDNXDE6k9bhg== + dependencies: + postcss "^7.0.5" + +css-select-base-adapter@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz#3b2ff4972cc362ab88561507a95408a1432135d7" + integrity sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w== + +css-select@^1.1.0, css-select@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + integrity sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg= + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-select@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-2.1.0.tgz#6a34653356635934a81baca68d0255432105dbef" + integrity sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ== + dependencies: + boolbase "^1.0.0" + css-what "^3.2.1" + domutils "^1.7.0" + nth-check "^1.0.2" + +css-to-react-native@^2.2.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/css-to-react-native/-/css-to-react-native-2.3.2.tgz#e75e2f8f7aa385b4c3611c52b074b70a002f2e7d" + integrity sha512-VOFaeZA053BqvvvqIA8c9n0+9vFppVBAHCp6JgFTtTMU3Mzi+XnelJ9XC9ul3BqFzZyQ5N+H0SnwsWT2Ebchxw== + dependencies: + camelize "^1.0.0" + css-color-keywords "^1.0.0" + postcss-value-parser "^3.3.0" + +css-tree@1.0.0-alpha.37: + version "1.0.0-alpha.37" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.37.tgz#98bebd62c4c1d9f960ec340cf9f7522e30709a22" + integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg== + dependencies: + mdn-data "2.0.4" + source-map "^0.6.1" + +css-tree@1.0.0-alpha.39: + version "1.0.0-alpha.39" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.39.tgz#2bff3ffe1bb3f776cf7eefd91ee5cba77a149eeb" + integrity sha512-7UvkEYgBAHRG9Nt980lYxjsTrCyHFN53ky3wVsDkiMdVqylqRt+Zc+jm5qw7/qyOvN2dHSYtX0e4MbCCExSvnA== + dependencies: + mdn-data "2.0.6" + source-map "^0.6.1" + +css-what@2.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2" + integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg== + +css-what@^3.2.1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-3.3.0.tgz#10fec696a9ece2e591ac772d759aacabac38cd39" + integrity sha512-pv9JPyatiPaQ6pf4OvD/dbfm0o5LviWmwxNWzblYf/1u9QZd0ihV+PMwy5jdQWQ3349kZmKEx9WXuSka2dM4cg== + +cssdb@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/cssdb/-/cssdb-4.4.0.tgz#3bf2f2a68c10f5c6a08abd92378331ee803cddb0" + integrity sha512-LsTAR1JPEM9TpGhl/0p3nQecC2LJ0kD8X5YARu1hk/9I1gril5vDtMZyNxcEpxxDj34YNck/ucjuoUd66K03oQ== + +cssesc@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-2.0.0.tgz#3b13bd1bb1cb36e1bcb5a4dcd27f54c5dcb35703" + integrity sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg== + +cssesc@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== + +cssnano-preset-default@^4.0.7: + version "4.0.7" + resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-4.0.7.tgz#51ec662ccfca0f88b396dcd9679cdb931be17f76" + integrity sha512-x0YHHx2h6p0fCl1zY9L9roD7rnlltugGu7zXSKQx6k2rYw0Hi3IqxcoAGF7u9Q5w1nt7vK0ulxV8Lo+EvllGsA== + dependencies: + css-declaration-sorter "^4.0.1" + cssnano-util-raw-cache "^4.0.1" + postcss "^7.0.0" + postcss-calc "^7.0.1" + postcss-colormin "^4.0.3" + postcss-convert-values "^4.0.1" + postcss-discard-comments "^4.0.2" + postcss-discard-duplicates "^4.0.2" + postcss-discard-empty "^4.0.1" + postcss-discard-overridden "^4.0.1" + postcss-merge-longhand "^4.0.11" + postcss-merge-rules "^4.0.3" + postcss-minify-font-values "^4.0.2" + postcss-minify-gradients "^4.0.2" + postcss-minify-params "^4.0.2" + postcss-minify-selectors "^4.0.2" + postcss-normalize-charset "^4.0.1" + postcss-normalize-display-values "^4.0.2" + postcss-normalize-positions "^4.0.2" + postcss-normalize-repeat-style "^4.0.2" + postcss-normalize-string "^4.0.2" + postcss-normalize-timing-functions "^4.0.2" + postcss-normalize-unicode "^4.0.1" + postcss-normalize-url "^4.0.1" + postcss-normalize-whitespace "^4.0.2" + postcss-ordered-values "^4.1.2" + postcss-reduce-initial "^4.0.3" + postcss-reduce-transforms "^4.0.2" + postcss-svgo "^4.0.2" + postcss-unique-selectors "^4.0.1" + +cssnano-util-get-arguments@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz#ed3a08299f21d75741b20f3b81f194ed49cc150f" + integrity sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8= + +cssnano-util-get-match@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d" + integrity sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0= + +cssnano-util-raw-cache@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282" + integrity sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA== + dependencies: + postcss "^7.0.0" + +cssnano-util-same-parent@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3" + integrity sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q== + +cssnano@^4.1.10: + version "4.1.10" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2" + integrity sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ== + dependencies: + cosmiconfig "^5.0.0" + cssnano-preset-default "^4.0.7" + is-resolvable "^1.0.0" + postcss "^7.0.0" + +csso@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/csso/-/csso-4.0.3.tgz#0d9985dc852c7cc2b2cacfbbe1079014d1a8e903" + integrity sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ== + dependencies: + css-tree "1.0.0-alpha.39" + +csstype@^2.5.2: + version "2.6.11" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.11.tgz#452f4d024149ecf260a852b025e36562a253ffc5" + integrity sha512-l8YyEC9NBkSm783PFTvh0FmJy7s5pFKrDp49ZL7zBGX3fWkO+N4EEyan1qqp8cwPLDcD0OSdyY6hAMoxp34JFw== + +cyclist@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-1.0.1.tgz#596e9698fd0c80e12038c2b82d6eb1b35b6224d9" + integrity sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk= + +date-time@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/date-time/-/date-time-2.1.0.tgz#0286d1b4c769633b3ca13e1e62558d2dbdc2eba2" + integrity sha512-/9+C44X7lot0IeiyfgJmETtRMhBidBYM2QFFIkGa0U1k+hSyY87Nw7PY3eDqpvCBm7I3WCSfPeZskW/YYq6m4g== + dependencies: + time-zone "^1.0.0" + +debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@^3.1.1, debug@^3.2.5: + version "3.2.6" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" + integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ== + dependencies: + ms "^2.1.1" + +debug@^4.0.0, debug@^4.1.0, debug@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== + dependencies: + ms "^2.1.1" + +decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= + +decko@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decko/-/decko-1.2.0.tgz#fd43c735e967b8013306884a56fbe665996b6817" + integrity sha1-/UPHNelnuAEzBohKVvvmZZlraBc= + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= + dependencies: + mimic-response "^1.0.0" + +decompress-tar@^4.0.0, decompress-tar@^4.1.0, decompress-tar@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/decompress-tar/-/decompress-tar-4.1.1.tgz#718cbd3fcb16209716e70a26b84e7ba4592e5af1" + integrity sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ== + dependencies: + file-type "^5.2.0" + is-stream "^1.1.0" + tar-stream "^1.5.2" + +decompress-tarbz2@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz#3082a5b880ea4043816349f378b56c516be1a39b" + integrity sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A== + dependencies: + decompress-tar "^4.1.0" + file-type "^6.1.0" + is-stream "^1.1.0" + seek-bzip "^1.0.5" + unbzip2-stream "^1.0.9" + +decompress-targz@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/decompress-targz/-/decompress-targz-4.1.1.tgz#c09bc35c4d11f3de09f2d2da53e9de23e7ce1eee" + integrity sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w== + dependencies: + decompress-tar "^4.1.1" + file-type "^5.2.0" + is-stream "^1.1.0" + +decompress-unzip@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/decompress-unzip/-/decompress-unzip-4.0.1.tgz#deaaccdfd14aeaf85578f733ae8210f9b4848f69" + integrity sha1-3qrM39FK6vhVePczroIQ+bSEj2k= + dependencies: + file-type "^3.8.0" + get-stream "^2.2.0" + pify "^2.3.0" + yauzl "^2.4.2" + +decompress@^4.2.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/decompress/-/decompress-4.2.1.tgz#007f55cc6a62c055afa37c07eb6a4ee1b773f118" + integrity sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ== + dependencies: + decompress-tar "^4.0.0" + decompress-tarbz2 "^4.0.0" + decompress-targz "^4.0.0" + decompress-unzip "^4.0.1" + graceful-fs "^4.1.10" + make-dir "^1.0.0" + pify "^2.3.0" + strip-dirs "^2.0.0" + +deep-equal@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.1.1.tgz#b5c98c942ceffaf7cb051e24e1434a25a2e6076a" + integrity sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g== + dependencies: + is-arguments "^1.0.4" + is-date-object "^1.0.1" + is-regex "^1.0.4" + object-is "^1.0.1" + object-keys "^1.1.1" + regexp.prototype.flags "^1.2.0" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + +deepmerge@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" + integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== + +default-gateway@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/default-gateway/-/default-gateway-4.2.0.tgz#167104c7500c2115f6dd69b0a536bb8ed720552b" + integrity sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA== + dependencies: + execa "^1.0.0" + ip-regex "^2.1.0" + +defaults@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" + integrity sha1-xlYFHpgX2f8I7YgUd/P+QBnz730= + dependencies: + clone "^1.0.2" + +defer-to-connect@^1.0.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" + integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== + +define-properties@^1.1.2, define-properties@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== + dependencies: + object-keys "^1.0.12" + +define-property@^0.2.5: + version "0.2.5" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" + integrity sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY= + dependencies: + is-descriptor "^0.1.0" + +define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" + integrity sha1-dp66rz9KY6rTr56NMEybvnm/sOY= + dependencies: + is-descriptor "^1.0.0" + +define-property@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" + integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ== + dependencies: + is-descriptor "^1.0.2" + isobject "^3.0.1" + +del@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/del/-/del-4.1.1.tgz#9e8f117222ea44a31ff3a156c049b99052a9f0b4" + integrity sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ== + dependencies: + "@types/glob" "^7.1.1" + globby "^6.1.0" + is-path-cwd "^2.0.0" + is-path-in-cwd "^2.0.0" + p-map "^2.0.0" + pify "^4.0.1" + rimraf "^2.6.3" + +del@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/del/-/del-5.1.0.tgz#d9487c94e367410e6eff2925ee58c0c84a75b3a7" + integrity sha512-wH9xOVHnczo9jN2IW68BabcecVPxacIA3g/7z6vhSU/4stOKQzeCRK0yD0A24WiAAUJmmVpWqrERcTxnLo3AnA== + dependencies: + globby "^10.0.1" + graceful-fs "^4.2.2" + is-glob "^4.0.1" + is-path-cwd "^2.2.0" + is-path-inside "^3.0.1" + p-map "^3.0.0" + rimraf "^3.0.0" + slash "^3.0.0" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= + +delegate@^3.1.2: + version "3.2.0" + resolved "https://registry.yarnpkg.com/delegate/-/delegate-3.2.0.tgz#b66b71c3158522e8ab5744f720d8ca0c2af59166" + integrity sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw== + +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= + +deprecation@^2.0.0, deprecation@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/deprecation/-/deprecation-2.3.1.tgz#6368cbdb40abf3373b525ac87e4a260c3a700919" + integrity sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ== + +des.js@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.1.tgz#5382142e1bdc53f85d86d53e5f4aa7deb91e0843" + integrity sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA== + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA= + +detab@2.0.3, detab@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/detab/-/detab-2.0.3.tgz#33e5dd74d230501bd69985a0d2b9a3382699a130" + integrity sha512-Up8P0clUVwq0FnFjDclzZsy9PadzRn5FFxrr47tQQvMHqyiFYVbpH8oXDzWtF0Q7pYy3l+RPmtBl+BsFF6wH0A== + dependencies: + repeat-string "^1.5.4" + +detect-node@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.0.4.tgz#014ee8f8f669c5c58023da64b8179c083a28c46c" + integrity sha512-ZIzRpLJrOj7jjP2miAtgqIfmzbxa4ZOr5jJc601zklsfEx9oTzmmj2nVpIPRpNlRTIh8lc1kyViIY7BWSGNmKw== + +detect-port-alt@1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz#24707deabe932d4a3cf621302027c2b266568275" + integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q== + dependencies: + address "^1.0.1" + debug "^2.6.0" + +detect-port@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.3.0.tgz#d9c40e9accadd4df5cac6a782aefd014d573d1f1" + integrity sha512-E+B1gzkl2gqxt1IhUzwjrxBKRqx1UzC3WLONHinn8S3T6lwV/agVCyitiFOsGJ/eYuEUBvD71MZHy3Pv1G9doQ== + dependencies: + address "^1.0.1" + debug "^2.6.0" + +detective-amd@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/detective-amd/-/detective-amd-3.0.0.tgz#40c8e21e229df8bca1ee2d4b952a7b67b01e2a5a" + integrity sha512-kOpKHyabdSKF9kj7PqYHLeHPw+TJT8q2u48tZYMkIcas28el1CYeLEJ42Nm+563/Fq060T5WknfwDhdX9+kkBQ== + dependencies: + ast-module-types "^2.3.1" + escodegen "^1.8.0" + get-amd-module-type "^3.0.0" + node-source-walk "^4.0.0" + +detective-cjs@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/detective-cjs/-/detective-cjs-3.1.1.tgz#18da3e39a002d2098a1123d45ce1de1b0d9045a0" + integrity sha512-JQtNTBgFY6h8uT6pgph5QpV3IyxDv+z3qPk/FZRDT9TlFfm5dnRtpH39WtQEr1khqsUxVqXzKjZHpdoQvQbllg== + dependencies: + ast-module-types "^2.4.0" + node-source-walk "^4.0.0" + +detective-es6@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/detective-es6/-/detective-es6-2.2.0.tgz#8f2baba3f8cd90a5cfd748f5ac436f0158ed2585" + integrity sha512-fSpNY0SLER7/sVgQZ1NxJPwmc9uCTzNgdkQDhAaj8NPYwr7Qji9QBcmbNvtMCnuuOGMuKn3O7jv0An+/WRWJZQ== + dependencies: + node-source-walk "^4.0.0" + +detective-less@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/detective-less/-/detective-less-1.0.2.tgz#a68af9ca5f69d74b7d0aa190218b211d83b4f7e3" + integrity sha512-Rps1xDkEEBSq3kLdsdnHZL1x2S4NGDcbrjmd4q+PykK5aJwDdP5MBgrJw1Xo+kyUHuv3JEzPqxr+Dj9ryeDRTA== + dependencies: + debug "^4.0.0" + gonzales-pe "^4.2.3" + node-source-walk "^4.0.0" + +detective-postcss@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/detective-postcss/-/detective-postcss-3.0.1.tgz#511921951f66135e17d0ece2e7604c6e4966c9c6" + integrity sha512-tfTS2GdpUal5NY0aCqI4dpEy8Xfr88AehYKB0iBIZvo8y2g3UsrcDnrp9PR2FbzoW7xD5Rip3NJW7eCSvtqdUw== + dependencies: + debug "^4.1.1" + is-url "^1.2.4" + postcss "^7.0.2" + postcss-values-parser "^1.5.0" + +detective-sass@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/detective-sass/-/detective-sass-3.0.1.tgz#496b819efd1f5c4dd3f0e19b43a8634bdd6927c4" + integrity sha512-oSbrBozRjJ+QFF4WJFbjPQKeakoaY1GiR380NPqwdbWYd5wfl5cLWv0l6LsJVqrgWfFN1bjFqSeo32Nxza8Lbw== + dependencies: + debug "^4.1.1" + gonzales-pe "^4.2.3" + node-source-walk "^4.0.0" + +detective-scss@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/detective-scss/-/detective-scss-2.0.1.tgz#06f8c21ae6dedad1fccc26d544892d968083eaf8" + integrity sha512-VveyXW4WQE04s05KlJ8K0bG34jtHQVgTc9InspqoQxvnelj/rdgSAy7i2DXAazyQNFKlWSWbS+Ro2DWKFOKTPQ== + dependencies: + debug "^4.1.1" + gonzales-pe "^4.2.3" + node-source-walk "^4.0.0" + +detective-stylus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/detective-stylus/-/detective-stylus-1.0.0.tgz#50aee7db8babb990381f010c63fabba5b58e54cd" + integrity sha1-UK7n24uruZA4HwEMY/q7pbWOVM0= + +detective-typescript@^5.8.0: + version "5.8.0" + resolved "https://registry.yarnpkg.com/detective-typescript/-/detective-typescript-5.8.0.tgz#c46776571e26bad6c9ada020cb3cb4e5625d1311" + integrity sha512-SrsUCfCaDTF64QVMHMidRal+kmkbIc5zP8cxxZPsomWx9vuEUjBlSJNhf7/ypE5cLdJJDI4qzKDmyzqQ+iz/xg== + dependencies: + "@typescript-eslint/typescript-estree" "^2.29.0" + ast-module-types "^2.6.0" + node-source-walk "^4.2.0" + typescript "^3.8.3" + +diffie-hellman@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" + integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg== + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + +dir-glob@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.0.0.tgz#0b205d2b6aef98238ca286598a8204d29d0a0034" + integrity sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag== + dependencies: + arrify "^1.0.1" + path-type "^3.0.0" + +dir-glob@^2.0.0, dir-glob@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.2.2.tgz#fa09f0694153c8918b18ba0deafae94769fc50c4" + integrity sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw== + dependencies: + path-type "^3.0.0" + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +dlv@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/dlv/-/dlv-1.1.3.tgz#5c198a8a11453596e751494d49874bc7732f2e79" + integrity sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA== + +dns-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" + integrity sha1-s55/HabrCnW6nBcySzR1PEfgZU0= + +dns-packet@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" + integrity sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg== + dependencies: + ip "^1.1.0" + safe-buffer "^5.0.1" + +dns-txt@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/dns-txt/-/dns-txt-2.0.2.tgz#b91d806f5d27188e4ab3e7d107d881a1cc4642b6" + integrity sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY= + dependencies: + buffer-indexof "^1.0.0" + +dom-converter@^0.2: + version "0.2.0" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" + integrity sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA== + dependencies: + utila "~0.4" + +dom-iterator@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dom-iterator/-/dom-iterator-1.0.0.tgz#9c09899846ec41c2d257adc4d6015e4759ef05ad" + integrity sha512-7dsMOQI07EMU98gQM8NSB3GsAiIeBYIPKpnxR3c9xOvdvBjChAcOM0iJ222I3p5xyiZO9e5oggkNaCusuTdYig== + dependencies: + component-props "1.1.1" + component-xor "0.0.4" + +dom-serializer@0: + version "0.2.2" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.2.2.tgz#1afb81f533717175d478655debc5e332d9f9bb51" + integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g== + dependencies: + domelementtype "^2.0.1" + entities "^2.0.0" + +dom-serializer@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.1.tgz#1ec4059e284babed36eec2941d4a970a189ce7c0" + integrity sha512-l0IU0pPzLWSHBcieZbpOKgkIn3ts3vAh7ZuFyXNwJxJXk/c4Gwj9xaTJwIDVQCXawWD0qb3IzMGH5rglQaO0XA== + dependencies: + domelementtype "^1.3.0" + entities "^1.1.1" + +domain-browser@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda" + integrity sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA== + +domelementtype@1, domelementtype@^1.3.0, domelementtype@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f" + integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w== + +domelementtype@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.0.1.tgz#1f8bdfe91f5a78063274e803b4bdcedf6e94f94d" + integrity sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ== + +domhandler@^2.3.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.4.2.tgz#8805097e933d65e85546f726d60f5eb88b44f803" + integrity sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA== + dependencies: + domelementtype "1" + +dompurify@^2.0.8: + version "2.0.12" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.0.12.tgz#284a2b041e1c60b8e72d7b4d2fadad36141254ae" + integrity sha512-Fl8KseK1imyhErHypFPA8qpq9gPzlsJ/EukA6yk9o0gX23p1TzC+rh9LqNg1qvErRTc0UNMYlKxEGSfSh43NDg== + +domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + integrity sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8= + dependencies: + dom-serializer "0" + domelementtype "1" + +domutils@^1.5.1, domutils@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.7.0.tgz#56ea341e834e06e6748af7a1cb25da67ea9f8c2a" + integrity sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg== + dependencies: + dom-serializer "0" + domelementtype "1" + +dot-case@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/dot-case/-/dot-case-3.0.3.tgz#21d3b52efaaba2ea5fda875bb1aa8124521cf4aa" + integrity sha512-7hwEmg6RiSQfm/GwPL4AAWXKy3YNNZA3oFv2Pdiey0mwkRCPZ9x6SZbkLcn8Ma5PYeVokzoD4Twv2n7LKp5WeA== + dependencies: + no-case "^3.0.3" + tslib "^1.10.0" + +dot-prop@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.0.tgz#1f19e0c2e1aa0e32797c49799f2837ac6af69c57" + integrity sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ== + dependencies: + is-obj "^1.0.0" + +dot-prop@^5.1.0, dot-prop@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-5.2.0.tgz#c34ecc29556dc45f1f4c22697b6f4904e0cc4fcb" + integrity sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A== + dependencies: + is-obj "^2.0.0" + +dotenv@^8.2.0: + version "8.2.0" + resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a" + integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw== + +download@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/download/-/download-7.1.0.tgz#9059aa9d70b503ee76a132897be6dec8e5587233" + integrity sha512-xqnBTVd/E+GxJVrX5/eUJiLYjCGPwMpdL+jGhGU57BvtcA7wwhtHVbXBeUk51kOpW3S7Jn3BQbN9Q1R1Km2qDQ== + dependencies: + archive-type "^4.0.0" + caw "^2.0.1" + content-disposition "^0.5.2" + decompress "^4.2.0" + ext-name "^5.0.0" + file-type "^8.1.0" + filenamify "^2.0.0" + get-stream "^3.0.0" + got "^8.3.1" + make-dir "^1.2.0" + p-event "^2.1.0" + pify "^3.0.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= + +duplexer@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.1.tgz#ace6ff808c1ce66b57d1ebf97977acb02334cfc1" + integrity sha1-rOb/gIwc5mtX0ev5eXessCM0z8E= + +duplexify@^3.4.2, duplexify@^3.6.0: + version "3.7.1" + resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309" + integrity sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g== + dependencies: + end-of-stream "^1.0.0" + inherits "^2.0.1" + readable-stream "^2.0.0" + stream-shift "^1.0.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= + +ejs@^2.6.1: + version "2.7.4" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.7.4.tgz#48661287573dcc53e366c7a1ae52c3a120eec9ba" + integrity sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA== + +electron-to-chromium@^1.3.378, electron-to-chromium@^1.3.413: + version "1.3.475" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.475.tgz#67688cc82c342f39594a412286e975eda45d8412" + integrity sha512-vcTeLpPm4+ccoYFXnepvkFt0KujdyrBU19KNEO40Pnkhta6mUi2K0Dn7NmpRcNz7BvysnSqeuIYScP003HWuYg== + +elf-tools@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/elf-tools/-/elf-tools-1.1.2.tgz#9d929985742b9f02b865bc334463b64dd1834b8a" + integrity sha512-x+p+XNxLk8ittsYN7294mCnQ2i48udu3UGdHBv2gw1u1MVigXctcfbp5H9ebqTJnDxkbs6PdOSBOAdYGGDN7uA== + +elliptic@^6.0.0, elliptic@^6.5.2: + version "6.5.3" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.3.tgz#cb59eb2efdaf73a0bd78ccd7015a62ad6e0f93d6" + integrity sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw== + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" + +"emoji-regex@>=6.0.0 <=6.1.1": + version "6.1.1" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-6.1.1.tgz#c6cd0ec1b0642e2a3c67a1137efc5e796da4f88e" + integrity sha1-xs0OwbBkLio8Z6ETfvxeeW2k+I4= + +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +emojis-list@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" + integrity sha1-TapNnbAPmBmIDHn6RXrlsJof04k= + +emojis-list@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" + integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== + +emoticon@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-3.2.0.tgz#c008ca7d7620fac742fe1bf4af8ff8fed154ae7f" + integrity sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg== + +emotion@^9.2.6: + version "9.2.12" + resolved "https://registry.yarnpkg.com/emotion/-/emotion-9.2.12.tgz#53925aaa005614e65c6e43db8243c843574d1ea9" + integrity sha512-hcx7jppaI8VoXxIWEhxpDW7I+B4kq9RNzQLmsrF6LY8BGKqe2N+gFAQr0EfuFucFlPs2A9HM4+xNj4NeqEWIOQ== + dependencies: + babel-plugin-emotion "^9.2.11" + create-emotion "^9.2.12" + +enabled@2.0.x: + version "2.0.0" + resolved "https://registry.yarnpkg.com/enabled/-/enabled-2.0.0.tgz#f9dd92ec2d6f4bbc0d5d1e64e21d61cd4665e7c2" + integrity sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k= + +encoding@^0.1.11: + version "0.1.13" + resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.13.tgz#56574afdd791f54a8e9b2785c0582a2d26210fa9" + integrity sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A== + dependencies: + iconv-lite "^0.6.2" + +end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1, end-of-stream@^1.4.4: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +enhanced-resolve@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.2.0.tgz#5d43bda4a0fd447cb0ebbe71bef8deff8805ad0d" + integrity sha512-S7eiFb/erugyd1rLb6mQ3Vuq+EXHv5cpCkNqqIkYkBgN2QdFnyCZzFBleqwGEx4lgNGYij81BWnCrFNK7vxvjQ== + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.5.0" + tapable "^1.0.0" + +entities@^1.1.1, entities@~1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56" + integrity sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w== + +entities@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/entities/-/entities-2.0.3.tgz#5c487e5742ab93c15abb5da22759b8590ec03b7f" + integrity sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ== + +envinfo@^7.3.1: + version "7.7.2" + resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.7.2.tgz#098f97a0e902f8141f9150553c92dbb282c4cabe" + integrity sha512-k3Eh5bKuQnZjm49/L7H4cHzs2FlL5QjbTB3JrPxoTI8aJG7hVMe4uKyJxSYH4ahseby2waUwk5OaKX/nAsaYgg== + +errno@^0.1.3, errno@~0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.7.tgz#4684d71779ad39af177e3f007996f7c67c852618" + integrity sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg== + dependencies: + prr "~1.0.1" + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +error-stack-parser@^2.0.2, error-stack-parser@^2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/error-stack-parser/-/error-stack-parser-2.0.6.tgz#5a99a707bd7a4c58a797902d48d82803ede6aad8" + integrity sha512-d51brTeqC+BHlwF0BhPtcYgF5nlzf9ZZ0ZIUQNZpc9ZB9qw5IJ2diTrBY9jlCJkTLITYPjmiX6OWCwH+fuyNgQ== + dependencies: + stackframe "^1.1.1" + +es-abstract@^1.17.0-next.1, es-abstract@^1.17.2, es-abstract@^1.17.5: + version "1.17.6" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.17.6.tgz#9142071707857b2cacc7b89ecb670316c3e2d52a" + integrity sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw== + dependencies: + es-to-primitive "^1.2.1" + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.1" + is-callable "^1.2.0" + is-regex "^1.1.0" + object-inspect "^1.7.0" + object-keys "^1.1.1" + object.assign "^4.1.0" + string.prototype.trimend "^1.0.1" + string.prototype.trimstart "^1.0.1" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +es6-promise@^3.2.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-3.3.1.tgz#a08cdde84ccdbf34d027a1451bc91d4bcd28a613" + integrity sha1-oIzd6EzNvzTQJ6FFG8kdS80ophM= + +escape-goat@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/escape-goat/-/escape-goat-2.1.1.tgz#1b2dc77003676c457ec760b2dc68edb648188675" + integrity sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q== + +escape-html@^1.0.3, escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg= + +escape-string-regexp@2.0.0, escape-string-regexp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344" + integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== + +escape-string-regexp@4.0.0, escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +escodegen@^1.8.0: + version "1.14.3" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.14.3.tgz#4e7b81fba61581dc97582ed78cab7f0e8d63f503" + integrity sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw== + dependencies: + esprima "^4.0.1" + estraverse "^4.2.0" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.6.1" + +eslint-scope@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" + integrity sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg== + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +eslint-visitor-keys@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e" + integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ== + +esprima@^4.0.0, esprima@^4.0.1, esprima@~4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esrecurse@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" + integrity sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ== + dependencies: + estraverse "^4.1.0" + +estraverse@^4.1.0, estraverse@^4.1.1, estraverse@^4.2.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +eta@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/eta/-/eta-1.1.1.tgz#acd575025233488a66870b02223001d4cd467441" + integrity sha512-YRchTAXZZrrJVmlAwmLHuKmlHNJ2hO0uen8LsDqvH5kknRjh0ef+Y9kInENygCgvu7FQNJBpbyDChfMo8f5Qgw== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= + +eval@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/eval/-/eval-0.1.4.tgz#e05dbe0dab4b9330215cbb7bf4886eb24bd58700" + integrity sha512-npGsebJejyjMRnLdFu+T/97dnigqIU0Ov3IGrZ8ygd1v7RL1vGkEKtvyWZobqUH1AQgKlg0Yqqe2BtMA9/QZLw== + dependencies: + require-like ">= 0.1.1" + +eventemitter3@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.4.tgz#b5463ace635a083d018bdc7c917b4c5f10a85384" + integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== + +events@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/events/-/events-1.1.1.tgz#9ebdb7635ad099c70dcc4c2a1f5004288e8bd924" + integrity sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ= + +events@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.1.0.tgz#84279af1b34cb75aa88bf5ff291f6d0bd9b31a59" + integrity sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg== + +eventsource@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-1.0.7.tgz#8fbc72c93fcd34088090bc0a4e64f4b5cee6d8d0" + integrity sha512-4Ln17+vVT0k8aWq+t/bF5arcS3EpT9gYtW66EPacdj/mAFevznsnyoHLPy2BA8gbIQeIHoPsvwmfBftfcG//BQ== + dependencies: + original "^1.0.0" + +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" + integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== + dependencies: + md5.js "^1.3.4" + safe-buffer "^5.1.1" + +execa@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.7.0.tgz#944becd34cc41ee32a63a9faf27ad5a65fc59777" + integrity sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c= + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +execa@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8" + integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA== + dependencies: + cross-spawn "^6.0.0" + get-stream "^4.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +execa@^2.0.3: + version "2.1.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-2.1.0.tgz#e5d3ecd837d2a60ec50f3da78fd39767747bbe99" + integrity sha512-Y/URAVapfbYy2Xp/gb6A0E7iR8xeqOCXsuuaoMn7A5PzrXUK84E1gyiEfq0wQd/GHA6GsoHWwhNq8anb0mleIw== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^3.0.0" + onetime "^5.1.0" + p-finally "^2.0.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +execa@^3.3.0, execa@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-3.4.0.tgz#c08ed4550ef65d858fac269ffc8572446f37eb89" + integrity sha512-r9vdGQk4bmCuK1yKQu1KTwcT2zwfWdbdaXfCtAh+5nU/4fSX+JAb7vZGvI5naJrQlvONrEB20jeruESI69530g== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + human-signals "^1.1.1" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.0" + onetime "^5.1.0" + p-finally "^2.0.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +expand-brackets@^2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" + integrity sha1-t3c14xXOMPa27/D4OwQVGiJEliI= + dependencies: + debug "^2.3.3" + define-property "^0.2.5" + extend-shallow "^2.0.1" + posix-character-classes "^0.1.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +express-logging@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/express-logging/-/express-logging-1.1.1.tgz#62839618cbab5bb3610f1a1c1485352fe9d26c2a" + integrity sha1-YoOWGMurW7NhDxocFIU1L+nSbCo= + dependencies: + on-headers "^1.0.0" + +express@^4.16.3, express@^4.17.1: + version "4.17.1" + resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134" + integrity sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g== + dependencies: + accepts "~1.3.7" + array-flatten "1.1.1" + body-parser "1.19.0" + content-disposition "0.5.3" + content-type "~1.0.4" + cookie "0.4.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "~1.1.2" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "~1.1.2" + fresh "0.5.2" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.5" + qs "6.7.0" + range-parser "~1.2.1" + safe-buffer "5.1.2" + send "0.17.1" + serve-static "1.14.1" + setprototypeof "1.1.1" + statuses "~1.5.0" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + +ext-list@^2.0.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/ext-list/-/ext-list-2.2.2.tgz#0b98e64ed82f5acf0f2931babf69212ef52ddd37" + integrity sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA== + dependencies: + mime-db "^1.28.0" + +ext-name@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ext-name/-/ext-name-5.0.0.tgz#70781981d183ee15d13993c8822045c506c8f0a6" + integrity sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ== + dependencies: + ext-list "^2.0.0" + sort-keys-length "^1.0.0" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8= + dependencies: + is-extendable "^0.1.0" + +extend-shallow@^3.0.0, extend-shallow@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" + integrity sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg= + dependencies: + assign-symbols "^1.0.0" + is-extendable "^1.0.1" + +extend@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +external-editor@^3.0.3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" + integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== + dependencies: + chardet "^0.7.0" + iconv-lite "^0.4.24" + tmp "^0.0.33" + +extglob@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" + integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw== + dependencies: + array-unique "^0.3.2" + define-property "^1.0.0" + expand-brackets "^2.1.4" + extend-shallow "^2.0.1" + fragment-cache "^0.2.1" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +extract-stack@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/extract-stack/-/extract-stack-1.0.0.tgz#b97acaf9441eea2332529624b732fc5a1c8165fa" + integrity sha1-uXrK+UQe6iMyUpYktzL8WhyBZfo= + +extract-stack@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/extract-stack/-/extract-stack-2.0.0.tgz#11367bc865bfcd9bc0db3123e5edb57786f11f9b" + integrity sha512-AEo4zm+TenK7zQorGK1f9mJ8L14hnTDi2ZQPR+Mub1NX8zimka1mXpV5LpH8x9HoUmFSHZCfLHqWvp0Y4FxxzQ== + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + integrity sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ= + +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-diff@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.2.0.tgz#73ee11982d86caaf7959828d519cfe927fac5f03" + integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w== + +fast-equals@^1.6.0: + version "1.6.3" + resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-1.6.3.tgz#84839a1ce20627c463e1892f2ae316380c81b459" + integrity sha512-4WKW0AL5+WEqO0zWavAfYGY1qwLsBgE//DN4TTcVEN2UlINgkv9b3vm2iHicoenWKSX9mKWmGOsU/iI5IST7pQ== + +fast-glob@^2.0.2, fast-glob@^2.2.6: + version "2.2.7" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.7.tgz#6953857c3afa475fff92ee6015d52da70a4cd39d" + integrity sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw== + dependencies: + "@mrmlnc/readdir-enhanced" "^2.2.1" + "@nodelib/fs.stat" "^1.1.2" + glob-parent "^3.1.0" + is-glob "^4.0.0" + merge2 "^1.2.3" + micromatch "^3.1.10" + +fast-glob@^3.0.3, fast-glob@^3.1.1: + version "3.2.4" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.4.tgz#d20aefbf99579383e7f3cc66529158c9b98554d3" + integrity sha512-kr/Oo6PX51265qeuCYsyGypiO5uJFgBS0jksyG7FUeCyQzNwYnzrNIMR1NXfkZXsMYXYLRAHgISHBz8gQcxKHQ== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.0" + merge2 "^1.3.0" + micromatch "^4.0.2" + picomatch "^2.2.1" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + +fast-safe-stringify@^2.0.4, fast-safe-stringify@^2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz#124aa885899261f68aedb42a7c080de9da608743" + integrity sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA== + +fast-stringify@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/fast-stringify/-/fast-stringify-1.1.2.tgz#f109b792d54343aec271b47882598d279402401d" + integrity sha512-SfslXjiH8km0WnRiuPfpUKwlZjW5I878qsOm+2x8x3TgqmElOOLh1rgJFb+PolNdNRK3r8urEefqx0wt7vx1dA== + +fast-url-parser@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d" + integrity sha1-9K8+qfNNiicc9YrSs3WfQx8LMY0= + dependencies: + punycode "^1.3.2" + +fastq@^1.6.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.8.0.tgz#550e1f9f59bbc65fe185cb6a9b4d95357107f481" + integrity sha512-SMIZoZdLh/fgofivvIkmknUXyPnvxRE3DhtZ5Me3Mrsk5gyPL42F0xr51TdRXskBxHfMp+07bcYzfsYEsSQA9Q== + dependencies: + reusify "^1.0.4" + +faye-websocket@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" + integrity sha1-TkkvjQTftviQA1B/btvy1QHnxvQ= + dependencies: + websocket-driver ">=0.5.1" + +faye-websocket@~0.11.1: + version "0.11.3" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.3.tgz#5c0e9a8968e8912c286639fde977a8b209f2508e" + integrity sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA== + dependencies: + websocket-driver ">=0.5.1" + +fbjs@^0.8.0: + version "0.8.17" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.8.17.tgz#c4d598ead6949112653d6588b01a5cdcd9f90fdd" + integrity sha1-xNWY6taUkRJlPWWIsBpc3Nn5D90= + dependencies: + core-js "^1.0.0" + isomorphic-fetch "^2.1.1" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^0.7.18" + +fd-slicer@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.1.0.tgz#25c7c89cb1f9077f8891bbe61d8f390eae256f1e" + integrity sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4= + dependencies: + pend "~1.2.0" + +fecha@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/fecha/-/fecha-4.2.0.tgz#3ffb6395453e3f3efff850404f0a59b6747f5f41" + integrity sha512-aN3pcx/DSmtyoovUudctc8+6Hl4T+hI9GBBHLjA76jdZl7+b1sgh5g4k+u/GL3dTy1/pnYzKp69FpJ0OicE3Wg== + +figgy-pudding@^3.5.1: + version "3.5.2" + resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.2.tgz#b4eee8148abb01dcf1d1ac34367d59e12fa61d6e" + integrity sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw== + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + integrity sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI= + dependencies: + escape-string-regexp "^1.0.5" + +figures@^3.0.0, figures@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" + integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== + dependencies: + escape-string-regexp "^1.0.5" + +file-loader@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-6.0.0.tgz#97bbfaab7a2460c07bcbd72d3a6922407f67649f" + integrity sha512-/aMOAYEFXDdjG0wytpTL5YQLfZnnTmLNjn+AIrJ/6HVnTfDqLsVKUUwkDf4I4kgex36BvjuXEn/TX9B/1ESyqQ== + dependencies: + loader-utils "^2.0.0" + schema-utils "^2.6.5" + +file-size@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/file-size/-/file-size-0.0.5.tgz#057d43c3a3ed735da3f90d6052ab380f1e6d5e3b" + integrity sha1-BX1Dw6Ptc12j+Q1gUqs4Dx5tXjs= + +file-type@^3.8.0: + version "3.9.0" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-3.9.0.tgz#257a078384d1db8087bc449d107d52a52672b9e9" + integrity sha1-JXoHg4TR24CHvESdEH1SpSZyuek= + +file-type@^4.2.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-4.4.0.tgz#1b600e5fca1fbdc6e80c0a70c71c8dba5f7906c5" + integrity sha1-G2AOX8ofvcboDApwxxyNul95BsU= + +file-type@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-5.2.0.tgz#2ddbea7c73ffe36368dfae49dc338c058c2b8ad6" + integrity sha1-LdvqfHP/42No365J3DOMBYwritY= + +file-type@^6.1.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-6.2.0.tgz#e50cd75d356ffed4e306dc4f5bcf52a79903a919" + integrity sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg== + +file-type@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-8.1.0.tgz#244f3b7ef641bbe0cca196c7276e4b332399f68c" + integrity sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ== + +file-uri-to-path@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" + integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== + +filename-reserved-regex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz#abf73dfab735d045440abfea2d91f389ebbfa229" + integrity sha1-q/c9+rc10EVECr/qLZHzieu/oik= + +filenamify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/filenamify/-/filenamify-2.1.0.tgz#88faf495fb1b47abfd612300002a16228c677ee9" + integrity sha512-ICw7NTT6RsDp2rnYKVd8Fu4cr6ITzGy3+u4vUujPkabyaz+03F24NWEX7fs5fp+kBonlaqPH8fAO2NM+SXt/JA== + dependencies: + filename-reserved-regex "^2.0.0" + strip-outer "^1.0.0" + trim-repeated "^1.0.0" + +filesize@6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/filesize/-/filesize-6.0.1.tgz#f850b509909c7c86f7e450ea19006c31c2ed3d2f" + integrity sha512-u4AYWPgbI5GBhs6id1KdImZWn5yfyFrrQ8OWZdN7ZMfA8Bf4HcO0BGo9bmUIEV8yrp8I1xVfJ/dn90GtFNNJcg== + +filesize@^3.6.1: + version "3.6.1" + resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.6.1.tgz#090bb3ee01b6f801a8a8be99d31710b3422bb317" + integrity sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg== + +fill-range@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" + integrity sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc= + dependencies: + extend-shallow "^2.0.1" + is-number "^3.0.0" + repeat-string "^1.6.1" + to-regex-range "^2.1.0" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +filter-obj@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filter-obj/-/filter-obj-2.0.1.tgz#34d9f0536786f072df7aeac3a8bda1c6e767aec6" + integrity sha512-yDEp513p7+iLdFHWBVdZFnRiOYwg8ZqmpaAiZCMjzqsbo7tCS4Qm4ulXOht337NGzkukKa9u3W4wqQ9tQPm3Ug== + +finalhandler@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d" + integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.3" + statuses "~1.5.0" + unpipe "~1.0.0" + +find-cache-dir@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7" + integrity sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ== + dependencies: + commondir "^1.0.1" + make-dir "^2.0.0" + pkg-dir "^3.0.0" + +find-cache-dir@^3.0.0, find-cache-dir@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-3.3.1.tgz#89b33fad4a4670daa94f855f7fbe31d6d84fe880" + integrity sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ== + dependencies: + commondir "^1.0.1" + make-dir "^3.0.2" + pkg-dir "^4.1.0" + +find-root@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== + +find-up@4.1.0, find-up@^4.0.0, find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +find-up@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + integrity sha1-RdG35QbHF93UgndaK3eSCjwMV6c= + dependencies: + locate-path "^2.0.0" + +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + +flatten@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.3.tgz#c1283ac9f27b368abc1e36d1ff7b04501a30356b" + integrity sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg== + +flush-write-stream@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8" + integrity sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w== + dependencies: + inherits "^2.0.3" + readable-stream "^2.3.6" + +flush-write-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-2.0.0.tgz#6f58e776154f5eefacff92a6e5a681c88ac50f7c" + integrity sha512-uXClqPxT4xW0lcdSBheb2ObVU+kuqUk3Jk64EwieirEXZx9XUrVwp/JuBfKAWaM4T5Td/VL7QLDWPXp/MvGm/g== + dependencies: + inherits "^2.0.3" + readable-stream "^3.1.1" + +fn.name@1.x.x: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fn.name/-/fn.name-1.1.0.tgz#26cad8017967aea8731bc42961d04a3d5988accc" + integrity sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw== + +folder-walker@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/folder-walker/-/folder-walker-3.2.0.tgz#98e00e59773f43416a6dcf0926d4c9436f65121d" + integrity sha512-VjAQdSLsl6AkpZNyrQJfO7BXLo4chnStqb055bumZMbRUPpVuPN3a4ktsnRCmrFZjtMlYLkyXiR5rAs4WOpC4Q== + dependencies: + from2 "^2.1.0" + +follow-redirects@^1.0.0: + version "1.12.0" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.12.0.tgz#ff0ccf85cf2c867c481957683b5f91b75b25e240" + integrity sha512-JgawlbfBQKjbKegPn8vUsvJqplE7KHJuhGO4yPcb+ZOIYKSr+xobMVlfRBToZwZUUxy7lFiKBdFNloz9ui368Q== + +for-in@^0.1.3: + version "0.1.8" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1" + integrity sha1-2Hc5COMSVhCZUrH9ubP6hn0ndeE= + +for-in@^1.0.1, for-in@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= + +for-own@^0.1.3: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + integrity sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4= + dependencies: + for-in "^1.0.1" + +foreach@^2.0.4: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k= + +fork-ts-checker-webpack-plugin@3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-3.1.1.tgz#a1642c0d3e65f50c2cc1742e9c0a80f441f86b19" + integrity sha512-DuVkPNrM12jR41KM2e+N+styka0EgLkTnXmNcXdgOM37vtGeY+oCBK/Jx0hzSeEU6memFCtWb4htrHPMDfwwUQ== + dependencies: + babel-code-frame "^6.22.0" + chalk "^2.4.1" + chokidar "^3.3.0" + micromatch "^3.1.10" + minimatch "^3.0.4" + semver "^5.6.0" + tapable "^1.0.0" + worker-rpc "^0.1.0" + +form-data@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.0.tgz#31b7e39c85f1355b7139ee0c647cf0de7f83c682" + integrity sha512-CKMFDglpbMi6PyN+brwB9Q/GOw0eAnsrEZDgcsH5Krhz5Od/haKHAX0NmQfha2zPPz0JpWzA7GJHGSnvCRLWsg== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +format-util@^1.0.3: + version "1.0.5" + resolved "https://registry.yarnpkg.com/format-util/-/format-util-1.0.5.tgz#1ffb450c8a03e7bccffe40643180918cc297d271" + integrity sha512-varLbTj0e0yVyRpqQhuWV+8hlePAgaoFRhNFj50BNjEIrw1/DphHSObtqwskVCPWNgzwPoQrZAbfa/SBiicNeg== + +forwarded@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84" + integrity sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ= + +fragment-cache@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" + integrity sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk= + dependencies: + map-cache "^0.2.2" + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= + +from2-array@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/from2-array/-/from2-array-0.0.4.tgz#eafc16b65f6e2719bcd57fdc1869005ac1332cd6" + integrity sha1-6vwWtl9uJxm81X/cGGkAWsEzLNY= + dependencies: + from2 "^2.0.3" + +from2@^2.0.3, from2@^2.1.0, from2@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + integrity sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8= + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fs-constants@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs-constants/-/fs-constants-1.0.0.tgz#6be0de9be998ce16af8afc24497b9ee9b7ccd9ad" + integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== + +fs-extra@^7.0.0, fs-extra@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9" + integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw== + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-8.1.0.tgz#49d43c45a88cd9677668cb7be1b46efdb8d2e1c0" + integrity sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.0.1.tgz#910da0062437ba4c39fedd863f1675ccfefcb9fc" + integrity sha512-h2iAoN838FqAFJY2/qVpzFXy+EBxfVE220PalAqQLDVsFOHLJrZvut5puAbCdNv6WJk+B8ihI+k0c7JK5erwqQ== + dependencies: + at-least-node "^1.0.0" + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^1.0.0" + +fs-minipass@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" + integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== + dependencies: + minipass "^3.0.0" + +fs-write-stream-atomic@^1.0.8: + version "1.0.10" + resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9" + integrity sha1-tH31NJPvkR33VzHnCp3tAYnbQMk= + dependencies: + graceful-fs "^4.1.2" + iferr "^0.1.5" + imurmurhash "^0.1.4" + readable-stream "1 || 2" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= + +fsevents@^1.2.7: + version "1.2.13" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.13.tgz#f325cb0455592428bcf11b383370ef70e3bfcc38" + integrity sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw== + dependencies: + bindings "^1.5.0" + nan "^2.12.1" + +fsevents@~2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" + integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +fuzzy@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/fuzzy/-/fuzzy-0.1.3.tgz#4c76ec2ff0ac1a36a9dccf9a00df8623078d4ed8" + integrity sha1-THbsL/CsGjap3M+aAN+GIweNTtg= + +gensync@^1.0.0-beta.1: + version "1.0.0-beta.1" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.1.tgz#58f4361ff987e5ff6e1e7a210827aa371eaac269" + integrity sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg== + +get-amd-module-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-amd-module-type/-/get-amd-module-type-3.0.0.tgz#bb334662fa04427018c937774570de495845c288" + integrity sha512-99Q7COuACPfVt18zH9N4VAMyb81S6TUgJm2NgV6ERtkh9VIkAaByZkW530wl3lLN5KTtSrK9jVLxYsoP5hQKsw== + dependencies: + ast-module-types "^2.3.2" + node-source-walk "^4.0.0" + +get-caller-file@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" + integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== + +get-caller-file@^2.0.1: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-own-enumerable-property-symbols@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" + integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== + +get-port@^5.1.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/get-port/-/get-port-5.1.1.tgz#0469ed07563479de6efb986baf053dcd7d4e3193" + integrity sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ== + +get-proxy@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/get-proxy/-/get-proxy-2.1.0.tgz#349f2b4d91d44c4d4d4e9cba2ad90143fac5ef93" + integrity sha512-zmZIaQTWnNQb4R4fJUEp/FC51eZsc6EkErspy3xtIYStaq8EB/hDIWipxsal+E8rz0qD7f2sL/NA9Xee4RInJw== + dependencies: + npm-conf "^1.1.0" + +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + integrity sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ= + +get-stream@^2.2.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-2.3.1.tgz#5f38f93f346009666ee0150a054167f91bdd95de" + integrity sha1-Xzj5PzRgCWZu4BUKBUFn+Rvdld4= + dependencies: + object-assign "^4.0.1" + pinkie-promise "^2.0.0" + +get-stream@^4.0.0, get-stream@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== + dependencies: + pump "^3.0.0" + +get-stream@^5.0.0, get-stream@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.1.0.tgz#01203cdc92597f9b909067c3e656cc1f4d3c4dc9" + integrity sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw== + dependencies: + pump "^3.0.0" + +get-value@^2.0.3, get-value@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" + integrity sha1-3BXKHGcjh8p2vTesCjlbogQqLCg= + +gh-release-fetch@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/gh-release-fetch/-/gh-release-fetch-1.0.3.tgz#0740c753ac30100bff8340788e421136b9e62009" + integrity sha512-Av+27/G9dJT5iPjFGZcHbjKJcTO1FrGIFQ3e8//9PJtNbK1QKbtm++R/rn8+OXH5ebtMGtXcVdyZrQLyf16i7g== + dependencies: + "@types/download" "^6.2.4" + "@types/mkdirp" "^0.5.2" + "@types/node-fetch" "^2.1.6" + "@types/semver" "^5.5.0" + download "^7.1.0" + mkdirp "^0.5.1" + node-fetch "^2.3.0" + semver "^5.6.0" + +git-repo-info@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-2.1.1.tgz#220ffed8cbae74ef8a80e3052f2ccb5179aed058" + integrity sha512-8aCohiDo4jwjOwma4FmYFd3i97urZulL8XL24nIPxuE+GZnfsAyy/g2Shqx6OjUiFKUXZM+Yy+KHnOmmA3FVcg== + +gitconfiglocal@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/gitconfiglocal/-/gitconfiglocal-2.1.0.tgz#07c28685c55cc5338b27b5acbcfe34aeb92e43d1" + integrity sha512-qoerOEliJn3z+Zyn1HW2F6eoYJqKwS6MgC9cztTLUB/xLWX8gD/6T60pKn4+t/d6tP7JlybI7Z3z+I572CR/Vg== + dependencies: + ini "^1.3.2" + +github-slugger@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/github-slugger/-/github-slugger-1.3.0.tgz#9bd0a95c5efdfc46005e82a906ef8e2a059124c9" + integrity sha512-gwJScWVNhFYSRDvURk/8yhcFBee6aFjye2a7Lhb2bUyRulpIoek9p0I9Kt7PT67d/nUlZbFu8L9RLiA0woQN8Q== + dependencies: + emoji-regex ">=6.0.0 <=6.1.1" + +glob-parent@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" + integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4= + dependencies: + is-glob "^3.1.0" + path-dirname "^1.0.0" + +glob-parent@^5.1.0, glob-parent@~5.1.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.1.tgz#b6c1ef417c4e5663ea498f1c45afac6916bbc229" + integrity sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ== + dependencies: + is-glob "^4.0.1" + +glob-to-regexp@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" + integrity sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs= + +glob@^7.0.0, glob@^7.0.3, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: + version "7.1.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" + integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +global-cache-dir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/global-cache-dir/-/global-cache-dir-1.0.1.tgz#2c0820b43bae8a6ef8adf96fd23ec6bbf52dd13c" + integrity sha512-wYGh6O3Xkx1LsMXQpObr/uu3PsFpbWhpbslgn9Xq52rbDZ6YOwJcQtU5R4lSEQgCDtXLItV9EH5X1F/VnBTAlw== + dependencies: + cachedir "^2.2.0" + make-dir "^3.0.0" + path-exists "^4.0.0" + +global-dirs@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-0.1.1.tgz#b319c0dd4607f353f3be9cca4c72fc148c49f445" + integrity sha1-sxnA3UYH81PzvpzKTHL8FIxJ9EU= + dependencies: + ini "^1.3.4" + +global-dirs@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-2.0.1.tgz#acdf3bb6685bcd55cb35e8a052266569e9469201" + integrity sha512-5HqUqdhkEovj2Of/ms3IeS/EekcO54ytHRLV4PEY2rhRwrHXLQjeVEES0Lhka0xwNDtGYn58wyC4s5+MHsOO6A== + dependencies: + ini "^1.3.5" + +global-modules@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" + integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A== + dependencies: + global-prefix "^3.0.0" + +global-prefix@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97" + integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg== + dependencies: + ini "^1.3.5" + kind-of "^6.0.2" + which "^1.3.1" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globby@8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-8.0.2.tgz#5697619ccd95c5275dbb2d6faa42087c1a941d8d" + integrity sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w== + dependencies: + array-union "^1.0.1" + dir-glob "2.0.0" + fast-glob "^2.0.2" + glob "^7.1.2" + ignore "^3.3.5" + pify "^3.0.0" + slash "^1.0.0" + +globby@^10.0.1: + version "10.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-10.0.2.tgz#277593e745acaa4646c3ab411289ec47a0392543" + integrity sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg== + dependencies: + "@types/glob" "^7.1.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.0.3" + glob "^7.1.3" + ignore "^5.1.1" + merge2 "^1.2.3" + slash "^3.0.0" + +globby@^11.0.1: + version "11.0.1" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.1.tgz#9a2bf107a068f3ffeabc49ad702c79ede8cfd357" + integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + +globby@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" + integrity sha1-9abXDoOV4hyFj7BInWTfAkJNUGw= + dependencies: + array-union "^1.0.1" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +globby@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/globby/-/globby-7.1.1.tgz#fb2ccff9401f8600945dfada97440cca972b8680" + integrity sha1-+yzP+UAfhgCUXfral0QMypcrhoA= + dependencies: + array-union "^1.0.1" + dir-glob "^2.0.0" + glob "^7.1.2" + ignore "^3.3.5" + pify "^3.0.0" + slash "^1.0.0" + +globby@^9.2.0: + version "9.2.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-9.2.0.tgz#fd029a706c703d29bdd170f4b6db3a3f7a7cb63d" + integrity sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg== + dependencies: + "@types/glob" "^7.1.1" + array-union "^1.0.2" + dir-glob "^2.2.2" + fast-glob "^2.2.6" + glob "^7.1.3" + ignore "^4.0.3" + pify "^4.0.1" + slash "^2.0.0" + +gonzales-pe@^4.2.3: + version "4.3.0" + resolved "https://registry.yarnpkg.com/gonzales-pe/-/gonzales-pe-4.3.0.tgz#fe9dec5f3c557eead09ff868c65826be54d067b3" + integrity sha512-otgSPpUmdWJ43VXyiNgEYE4luzHCL2pz4wQ0OnDluC6Eg4Ko3Vexy/SrSynglw/eR+OhkzmqFCZa/OFa/RgAOQ== + dependencies: + minimist "^1.2.5" + +good-listener@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/good-listener/-/good-listener-1.2.2.tgz#d53b30cdf9313dffb7dc9a0d477096aa6d145c50" + integrity sha1-1TswzfkxPf+33JoNR3CWqm0UXFA= + dependencies: + delegate "^3.1.2" + +got@^6.7.1: + version "6.7.1" + resolved "https://registry.yarnpkg.com/got/-/got-6.7.1.tgz#240cd05785a9a18e561dc1b44b41c763ef1e8db0" + integrity sha1-JAzQV4WpoY5WHcG0S0HHY+8ejbA= + dependencies: + create-error-class "^3.0.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-redirect "^1.0.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + lowercase-keys "^1.0.0" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + unzip-response "^2.0.1" + url-parse-lax "^1.0.0" + +got@^8.3.1: + version "8.3.2" + resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" + integrity sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw== + dependencies: + "@sindresorhus/is" "^0.7.0" + cacheable-request "^2.1.1" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + into-stream "^3.1.0" + is-retry-allowed "^1.1.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + mimic-response "^1.0.0" + p-cancelable "^0.4.0" + p-timeout "^2.0.1" + pify "^3.0.0" + safe-buffer "^5.1.1" + timed-out "^4.0.1" + url-parse-lax "^3.0.0" + url-to-options "^1.0.1" + +got@^9.6.0: + version "9.6.0" + resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" + integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== + dependencies: + "@sindresorhus/is" "^0.14.0" + "@szmarczak/http-timer" "^1.1.2" + cacheable-request "^6.0.0" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^4.1.0" + lowercase-keys "^1.0.1" + mimic-response "^1.0.1" + p-cancelable "^1.0.0" + to-readable-stream "^1.0.0" + url-parse-lax "^3.0.0" + +graceful-fs@^4.1.10, graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.2: + version "4.2.4" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.4.tgz#2256bde14d3632958c465ebc96dc467ca07a29fb" + integrity sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw== + +grapheme-splitter@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz#9cf3a665c6247479896834af35cf1dbb4400767e" + integrity sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ== + +gray-matter@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/gray-matter/-/gray-matter-4.0.2.tgz#9aa379e3acaf421193fce7d2a28cebd4518ac454" + integrity sha512-7hB/+LxrOjq/dd8APlK0r24uL/67w7SkYnfwhNFwg/VDIGWGmduTDYf3WNstLW2fbbmRwrDGCVSJ2isuf2+4Hw== + dependencies: + js-yaml "^3.11.0" + kind-of "^6.0.2" + section-matter "^1.0.0" + strip-bom-string "^1.0.0" + +gud@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0" + integrity sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw== + +gzip-size@5.1.1, gzip-size@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-5.1.1.tgz#cb9bee692f87c0612b232840a873904e4c135274" + integrity sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA== + dependencies: + duplexer "^0.1.1" + pify "^4.0.1" + +handle-thing@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.1.tgz#857f79ce359580c340d43081cc648970d0bb234e" + integrity sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg== + +has-ansi@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e" + integrity sha1-hPJlqujA5qiKEtcCKJS3VoiUxi4= + dependencies: + ansi-regex "^0.2.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= + dependencies: + ansi-regex "^2.0.0" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + integrity sha1-6CB68cx7MNRGzHC3NLXovhj4jVE= + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-glob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-glob/-/has-glob-1.0.0.tgz#9aaa9eedbffb1ba3990a7b0010fb678ee0081207" + integrity sha1-mqqe7b/7G6OZCnsAEPtnjuAIEgc= + dependencies: + is-glob "^3.0.0" + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + integrity sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw== + +has-symbols@^1.0.0, has-symbols@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.1.tgz#9f5214758a44196c406d9bd76cebf81ec2dd31e8" + integrity sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg== + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + integrity sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw== + dependencies: + has-symbol-support-x "^1.4.1" + +has-value@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" + integrity sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8= + dependencies: + get-value "^2.0.3" + has-values "^0.1.4" + isobject "^2.0.0" + +has-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" + integrity sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc= + dependencies: + get-value "^2.0.6" + has-values "^1.0.0" + isobject "^3.0.0" + +has-values@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" + integrity sha1-bWHeldkd/Km5oCCJrThL/49it3E= + +has-values@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" + integrity sha1-lbC2P+whRmGab+V/51Yo1aOe/k8= + dependencies: + is-number "^3.0.0" + kind-of "^4.0.0" + +has-yarn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-2.1.0.tgz#137e11354a7b5bf11aa5cb649cf0c6f3ff2b2e77" + integrity sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw== + +has@^1.0.0, has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +hash-base@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33" + integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA== + dependencies: + inherits "^2.0.4" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +hash.js@^1.0.0, hash.js@^1.0.3: + version "1.1.7" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" + integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.1" + +hasha@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/hasha/-/hasha-5.2.0.tgz#33094d1f69c40a4a6ac7be53d5fe3ff95a269e0c" + integrity sha512-2W+jKdQbAdSIrggA8Q35Br8qKadTrqCTC8+XZvBWepKDK6m9XkX6Iz1a2yh2KP01kzAR/dpuMeUnocoLYDcskw== + dependencies: + is-stream "^2.0.0" + type-fest "^0.8.0" + +hast-to-hyperscript@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/hast-to-hyperscript/-/hast-to-hyperscript-9.0.0.tgz#768fb557765fe28749169c885056417342d71e83" + integrity sha512-NJvMYU3GlMLs7hN3CRbsNlMzusVNkYBogVWDGybsuuVQ336gFLiD+q9qtFZT2meSHzln3pNISZWTASWothMSMg== + dependencies: + "@types/unist" "^2.0.3" + comma-separated-tokens "^1.0.0" + property-information "^5.3.0" + space-separated-tokens "^1.0.0" + style-to-object "^0.3.0" + unist-util-is "^4.0.0" + web-namespaces "^1.0.0" + +hast-util-from-parse5@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-5.0.3.tgz#3089dc0ee2ccf6ec8bc416919b51a54a589e097c" + integrity sha512-gOc8UB99F6eWVWFtM9jUikjN7QkWxB3nY0df5Z0Zq1/Nkwl5V4hAAsl0tmwlgWl/1shlTF8DnNYLO8X6wRV9pA== + dependencies: + ccount "^1.0.3" + hastscript "^5.0.0" + property-information "^5.0.0" + web-namespaces "^1.1.2" + xtend "^4.0.1" + +hast-util-from-parse5@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-6.0.0.tgz#b38793c81e1a99f5fd592a4a88fc2731dccd0f30" + integrity sha512-3ZYnfKenbbkhhNdmOQqgH10vnvPivTdsOJCri+APn0Kty+nRkDHArnaX9Hiaf8H+Ig+vkNptL+SRY/6RwWJk1Q== + dependencies: + "@types/parse5" "^5.0.0" + ccount "^1.0.0" + hastscript "^5.0.0" + property-information "^5.0.0" + vfile "^4.0.0" + web-namespaces "^1.0.0" + +hast-util-parse-selector@^2.0.0: + version "2.2.4" + resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-2.2.4.tgz#60c99d0b519e12ab4ed32e58f150ec3f61ed1974" + integrity sha512-gW3sxfynIvZApL4L07wryYF4+C9VvH3AUi7LAnVXV4MneGEgwOByXvFo18BgmTWnm7oHAe874jKbIB1YhHSIzA== + +hast-util-raw@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-6.0.0.tgz#49a38f5107d483f83a139709f2f705f22e7e7d32" + integrity sha512-IQo6tv3bMMKxk53DljswliucCJOQxaZFCuKEJ7X80249dmJ1nA9LtOnnylsLlqTG98NjQ+iGcoLAYo9q5FRhRg== + dependencies: + "@types/hast" "^2.0.0" + hast-util-from-parse5 "^6.0.0" + hast-util-to-parse5 "^6.0.0" + html-void-elements "^1.0.0" + parse5 "^6.0.0" + unist-util-position "^3.0.0" + vfile "^4.0.0" + web-namespaces "^1.0.0" + xtend "^4.0.0" + zwitch "^1.0.0" + +hast-util-to-parse5@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz#1ec44650b631d72952066cea9b1445df699f8479" + integrity sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ== + dependencies: + hast-to-hyperscript "^9.0.0" + property-information "^5.0.0" + web-namespaces "^1.0.0" + xtend "^4.0.0" + zwitch "^1.0.0" + +hastscript@^5.0.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-5.1.2.tgz#bde2c2e56d04c62dd24e8c5df288d050a355fb8a" + integrity sha512-WlztFuK+Lrvi3EggsqOkQ52rKbxkXL3RwB6t5lwoa8QLMemoWfBuL43eDrwOamJyR7uKQKdmKYaBH1NZBiIRrQ== + dependencies: + comma-separated-tokens "^1.0.0" + hast-util-parse-selector "^2.0.0" + property-information "^5.0.0" + space-separated-tokens "^1.0.0" + +he@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +hex-color-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" + integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ== + +history@^4.9.0: + version "4.10.1" + resolved "https://registry.yarnpkg.com/history/-/history-4.10.1.tgz#33371a65e3a83b267434e2b3f3b1b4c58aad4cf3" + integrity sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew== + dependencies: + "@babel/runtime" "^7.1.2" + loose-envify "^1.2.0" + resolve-pathname "^3.0.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + value-equal "^1.0.1" + +hmac-drbg@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" + integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE= + dependencies: + hash.js "^1.0.3" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.1" + +hoist-non-react-statics@^3.1.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + +hoopy@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/hoopy/-/hoopy-0.1.4.tgz#609207d661100033a9a9402ad3dea677381c1b1d" + integrity sha512-HRcs+2mr52W0K+x8RzcLzuPPmVIKMSv97RGHy0Ea9y/mpcaK+xTrjICA04KAHi4GRzxliNqNJEFYWHghy3rSfQ== + +hosted-git-info@^2.1.4: + version "2.8.8" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.8.tgz#7539bd4bc1e0e0a895815a2e0262420b12858488" + integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg== + +hpack.js@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2" + integrity sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI= + dependencies: + inherits "^2.0.1" + obuf "^1.0.0" + readable-stream "^2.0.1" + wbuf "^1.1.0" + +hsl-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e" + integrity sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4= + +hsla-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38" + integrity sha1-wc56MWjIxmFAM6S194d/OyJfnDg= + +html-comment-regex@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.2.tgz#97d4688aeb5c81886a364faa0cad1dda14d433a7" + integrity sha512-P+M65QY2JQ5Y0G9KKdlDpo0zK+/OHptU5AaBwUfAIDJZk1MYf32Frm84EcOytfJE0t5JvkAnKlmjsXDnWzCJmQ== + +html-entities@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.3.1.tgz#fb9a1a4b5b14c5daba82d3e34c6ae4fe701a0e44" + integrity sha512-rhE/4Z3hIhzHAUKbW8jVcCyuT5oJCXXqhN/6mXXVCpzTmvJnoH2HL/bt3EZ6p55jbFJBeAe1ZNpL5BugLujxNA== + +html-minifier-terser@^5.0.1, html-minifier-terser@^5.0.5: + version "5.1.1" + resolved "https://registry.yarnpkg.com/html-minifier-terser/-/html-minifier-terser-5.1.1.tgz#922e96f1f3bb60832c2634b79884096389b1f054" + integrity sha512-ZPr5MNObqnV/T9akshPKbVgyOqLmy+Bxo7juKCfTfnjNniTAMdy4hz21YQqoofMBJD2kdREaqPPdThoR78Tgxg== + dependencies: + camel-case "^4.1.1" + clean-css "^4.2.3" + commander "^4.1.1" + he "^1.2.0" + param-case "^3.0.3" + relateurl "^0.2.7" + terser "^4.6.3" + +html-tags@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/html-tags/-/html-tags-3.1.0.tgz#7b5e6f7e665e9fb41f30007ed9e0d41e97fb2140" + integrity sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg== + +html-void-elements@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-1.0.5.tgz#ce9159494e86d95e45795b166c2021c2cfca4483" + integrity sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w== + +html-webpack-plugin@^4.0.4: + version "4.3.0" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-4.3.0.tgz#53bf8f6d696c4637d5b656d3d9863d89ce8174fd" + integrity sha512-C0fzKN8yQoVLTelcJxZfJCE+aAvQiY2VUf3UuKrR4a9k5UMWYOtpDLsaXwATbcVCnI05hUS7L9ULQHWLZhyi3w== + dependencies: + "@types/html-minifier-terser" "^5.0.0" + "@types/tapable" "^1.0.5" + "@types/webpack" "^4.41.8" + html-minifier-terser "^5.0.1" + loader-utils "^1.2.3" + lodash "^4.17.15" + pretty-error "^2.1.1" + tapable "^1.1.3" + util.promisify "1.0.0" + +htmlparser2@^3.3.0, htmlparser2@^3.9.1: + version "3.10.1" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f" + integrity sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ== + dependencies: + domelementtype "^1.3.1" + domhandler "^2.3.0" + domutils "^1.5.1" + entities "^1.1.1" + inherits "^2.0.1" + readable-stream "^3.1.1" + +http-cache-semantics@3.8.1: + version "3.8.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" + integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w== + +http-cache-semantics@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" + integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + +http-call@^5.2.2: + version "5.3.0" + resolved "https://registry.yarnpkg.com/http-call/-/http-call-5.3.0.tgz#4ded815b13f423de176eb0942d69c43b25b148db" + integrity sha512-ahwimsC23ICE4kPl9xTBjKB4inbRaeLyZeRunC/1Jy/Z6X8tv22MEAjK+KBOMSVLaqXPTTmd8638waVIKLGx2w== + dependencies: + content-type "^1.0.4" + debug "^4.1.1" + is-retry-allowed "^1.1.0" + is-stream "^2.0.0" + parse-json "^4.0.0" + tunnel-agent "^0.6.0" + +http-deceiver@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87" + integrity sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc= + +http-errors@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f" + integrity sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg== + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.1" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-errors@1.7.3, http-errors@~1.7.2: + version "1.7.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.3.tgz#6c619e4f9c60308c38519498c14fbb10aacebb06" + integrity sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw== + dependencies: + depd "~1.1.2" + inherits "2.0.4" + setprototypeof "1.1.1" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-errors@~1.6.2: + version "1.6.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" + integrity sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0= + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.0" + statuses ">= 1.4.0 < 2" + +http-errors@~1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.0.tgz#75d1bbe497e1044f51e4ee9e704a62f28d336507" + integrity sha512-4I8r0C5JDhT5VkvI47QktDW75rNlGVsUf/8hzjCC/wkWI/jdTRmBb9aI7erSG82r1bjKY3F6k28WnsVxB1C73A== + dependencies: + depd "~1.1.2" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-parser-js@>=0.5.1: + version "0.5.2" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.2.tgz#da2e31d237b393aae72ace43882dd7e270a8ff77" + integrity sha512-opCO9ASqg5Wy2FNo7A0sxy71yGbbkJJXLdgMK04Tcypw9jr2MgWbyubb0+WdmDmGnFflO7fRbqbaihh/ENDlRQ== + +http-proxy-middleware@0.19.1: + version "0.19.1" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz#183c7dc4aa1479150306498c210cdaf96080a43a" + integrity sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q== + dependencies: + http-proxy "^1.17.0" + is-glob "^4.0.0" + lodash "^4.17.11" + micromatch "^3.1.10" + +http-proxy-middleware@^0.21.0: + version "0.21.0" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.21.0.tgz#c6b1ca05174b5fbc57bee9485ffa0fa2f0dabeb0" + integrity sha512-4Arcl5QQ6pRMRJmtM1WVHKHkFAQn5uvw83XuNeqnMTOikDiCoTxv5/vdudhKQsF+1mtaAawrK2SEB1v2tYecdQ== + dependencies: + "@types/http-proxy" "^1.17.3" + http-proxy "^1.18.0" + is-glob "^4.0.1" + lodash "^4.17.15" + micromatch "^4.0.2" + +http-proxy@^1.17.0, http-proxy@^1.18.0: + version "1.18.1" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549" + integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ== + dependencies: + eventemitter3 "^4.0.0" + follow-redirects "^1.0.0" + requires-port "^1.0.0" + +http2-client@^1.2.5: + version "1.3.3" + resolved "https://registry.yarnpkg.com/http2-client/-/http2-client-1.3.3.tgz#90fc15d646cca86956b156d07c83947d57d659a9" + integrity sha512-nUxLymWQ9pzkzTmir24p2RtsgruLmhje7lH3hLX1IpwvyTg77fW+1brenPPP3USAR+rQ36p5sTA/x7sjCJVkAA== + +https-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" + integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM= + +human-signals@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" + integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== + +hyperlinker@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hyperlinker/-/hyperlinker-1.0.0.tgz#23dc9e38a206b208ee49bc2d6c8ef47027df0c0e" + integrity sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ== + +iconv-lite@0.4.24, iconv-lite@^0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +iconv-lite@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.2.tgz#ce13d1875b0c3a674bd6a04b7f76b01b1b6ded01" + integrity sha512-2y91h5OpQlolefMPmUlivelittSWy0rP+oYVpn6A7GwVHNE8AWzoYOBNmlwks3LobaJxgHCYZAnyNo2GgpNRNQ== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +icss-utils@^4.0.0, icss-utils@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-4.1.1.tgz#21170b53789ee27447c2f47dd683081403f9a467" + integrity sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA== + dependencies: + postcss "^7.0.14" + +ieee754@1.1.13, ieee754@^1.1.4: + version "1.1.13" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.13.tgz#ec168558e95aa181fd87d37f55c32bbcb6708b84" + integrity sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg== + +iferr@^0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501" + integrity sha1-xg7taebY/bazEEofy8ocGS3FtQE= + +ignore-walk@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.3.tgz#017e2447184bfeade7c238e4aefdd1e8f95b1e37" + integrity sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw== + dependencies: + minimatch "^3.0.4" + +ignore@^3.3.5: + version "3.3.10" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" + integrity sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug== + +ignore@^4.0.3: + version "4.0.6" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" + integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== + +ignore@^5.1.1, ignore@^5.1.4: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + +immer@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/immer/-/immer-1.10.0.tgz#bad67605ba9c810275d91e1c2a47d4582e98286d" + integrity sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg== + +import-cwd@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-cwd/-/import-cwd-2.1.0.tgz#aa6cf36e722761285cb371ec6519f53e2435b0a9" + integrity sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk= + dependencies: + import-from "^2.1.0" + +import-fresh@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" + integrity sha1-2BNVwVYS04bGH53dOSLUMEgipUY= + dependencies: + caller-path "^2.0.0" + resolve-from "^3.0.0" + +import-fresh@^3.1.0, import-fresh@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.2.1.tgz#633ff618506e793af5ac91bf48b72677e15cbe66" + integrity sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +import-from@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-from/-/import-from-2.1.0.tgz#335db7f2a7affd53aaa471d4b8021dee36b7f3b1" + integrity sha1-M1238qev/VOqpHHUuAId7ja387E= + dependencies: + resolve-from "^3.0.0" + +import-lazy@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-2.1.0.tgz#05698e3d45c88e8d7e9d92cb0584e77f096f3e43" + integrity sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM= + +import-local@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-2.0.0.tgz#55070be38a5993cf18ef6db7e961f5bee5c5a09d" + integrity sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ== + dependencies: + pkg-dir "^3.0.0" + resolve-cwd "^2.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +indent-string@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + integrity sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok= + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +indexes-of@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" + integrity sha1-8w9xbI4r00bHtn0985FVZqfAVgc= + +infer-owner@^1.0.3, infer-owner@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" + integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A== + +infima@0.2.0-alpha.12: + version "0.2.0-alpha.12" + resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.12.tgz#6b4a0ba9756262e4f1af2c60feb4bc0ffd9b9e21" + integrity sha512-in5n36oE2sdiB/1rzuzdmKyuNRMVUO9P+qUidUG8leHeDU+WMQ7oTP7MXSqtAAxduiPb7HHi0/ptQLLUr/ge4w== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +inherits@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" + integrity sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE= + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= + +ini@^1.3.2, ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw== + +inline-style-parser@0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" + integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== + +inquirer-autocomplete-prompt@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/inquirer-autocomplete-prompt/-/inquirer-autocomplete-prompt-1.0.2.tgz#3f2548f73dd12f0a541be055ea9c8c7aedeb42bf" + integrity sha512-vNmAhhrOQwPnUm4B9kz1UB7P98rVF1z8txnjp53r40N0PBCuqoRWqjg3Tl0yz0UkDg7rEUtZ2OZpNc7jnOU9Zw== + dependencies: + ansi-escapes "^3.0.0" + chalk "^2.0.0" + figures "^2.0.0" + run-async "^2.3.0" + +inquirer@7.0.4: + version "7.0.4" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-7.0.4.tgz#99af5bde47153abca23f5c7fc30db247f39da703" + integrity sha512-Bu5Td5+j11sCkqfqmUTiwv+tWisMtP0L7Q8WrqA2C/BbBhy1YTdFrvjjlrKq8oagA/tLQBski2Gcx/Sqyi2qSQ== + dependencies: + ansi-escapes "^4.2.1" + chalk "^2.4.2" + cli-cursor "^3.1.0" + cli-width "^2.0.0" + external-editor "^3.0.3" + figures "^3.0.0" + lodash "^4.17.15" + mute-stream "0.0.8" + run-async "^2.2.0" + rxjs "^6.5.3" + string-width "^4.1.0" + strip-ansi "^5.1.0" + through "^2.3.6" + +inquirer@^6.5.1: + version "6.5.2" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.2.tgz#ad50942375d036d327ff528c08bd5fab089928ca" + integrity sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ== + dependencies: + ansi-escapes "^3.2.0" + chalk "^2.4.2" + cli-cursor "^2.1.0" + cli-width "^2.0.0" + external-editor "^3.0.3" + figures "^2.0.0" + lodash "^4.17.12" + mute-stream "0.0.7" + run-async "^2.2.0" + rxjs "^6.4.0" + string-width "^2.1.0" + strip-ansi "^5.1.0" + through "^2.3.6" + +inquirer@^7.2.0: + version "7.3.3" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-7.3.3.tgz#04d176b2af04afc157a83fd7c100e98ee0aad003" + integrity sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA== + dependencies: + ansi-escapes "^4.2.1" + chalk "^4.1.0" + cli-cursor "^3.1.0" + cli-width "^3.0.0" + external-editor "^3.0.3" + figures "^3.0.0" + lodash "^4.17.19" + mute-stream "0.0.8" + run-async "^2.4.0" + rxjs "^6.6.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + through "^2.3.6" + +internal-ip@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/internal-ip/-/internal-ip-4.3.0.tgz#845452baad9d2ca3b69c635a137acb9a0dad0907" + integrity sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg== + dependencies: + default-gateway "^4.2.0" + ipaddr.js "^1.9.0" + +interpret@^1.0.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" + integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA== + +into-stream@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" + integrity sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY= + dependencies: + from2 "^2.1.1" + p-is-promise "^1.1.0" + +invariant@^2.2.2, invariant@^2.2.4: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== + dependencies: + loose-envify "^1.0.0" + +invert-kv@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02" + integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA== + +ip-regex@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" + integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk= + +ip@^1.1.0, ip@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + integrity sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo= + +ipaddr.js@1.9.1, ipaddr.js@^1.9.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + integrity sha1-UFMN+4T8yap9vnhS6Do3uTufKqY= + +is-absolute-url@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-3.0.3.tgz#96c6a22b6a23929b11ea0afb1836c36ad4a5d698" + integrity sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q== + +is-accessor-descriptor@^0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" + integrity sha1-qeEss66Nh2cn7u84Q/igiXtcmNY= + dependencies: + kind-of "^3.0.2" + +is-accessor-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" + integrity sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ== + dependencies: + kind-of "^6.0.0" + +is-alphabetical@1.0.4, is-alphabetical@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.4.tgz#9e7d6b94916be22153745d184c298cbf986a686d" + integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg== + +is-alphanumeric@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-alphanumeric/-/is-alphanumeric-1.0.0.tgz#4a9cef71daf4c001c1d81d63d140cf53fd6889f4" + integrity sha1-Spzvcdr0wAHB2B1j0UDPU/1oifQ= + +is-alphanumerical@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz#7eb9a2431f855f6b1ef1a78e326df515696c4dbf" + integrity sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A== + dependencies: + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + +is-arguments@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.0.4.tgz#3faf966c7cba0ff437fb31f6250082fcf0448cf3" + integrity sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA== + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= + +is-arrayish@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" + integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + integrity sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg= + dependencies: + binary-extensions "^1.0.0" + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-buffer@^1.0.2, is-buffer@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== + +is-buffer@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.4.tgz#3e572f23c8411a5cfd9557c849e3665e0b290623" + integrity sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A== + +is-callable@^1.1.4, is-callable@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.0.tgz#83336560b54a38e35e3a2df7afd0454d691468bb" + integrity sha512-pyVD9AaGLxtg6srb2Ng6ynWJqkHU9bEM087AKck0w8QwDarTfNcpIYoU8x8Hv2Icm8u6kFJM18Dag8lyqGkviw== + +is-ci@^1.0.10: + version "1.2.1" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-1.2.1.tgz#e3779c8ee17fccf428488f6e281187f2e632841c" + integrity sha512-s6tfsaQaQi3JNciBH6shVqEDvhGut0SUXr31ag8Pd8BBbVVlcGfWhpPmEOoM6RJ5TFhbypvf5yyRw/VXW1IiWg== + dependencies: + ci-info "^1.5.0" + +is-ci@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" + integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== + dependencies: + ci-info "^2.0.0" + +is-color-stop@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345" + integrity sha1-z/9HGu5N1cnhWFmPvhKWe1za00U= + dependencies: + css-color-names "^0.0.4" + hex-color-regex "^1.1.0" + hsl-regex "^1.0.0" + hsla-regex "^1.0.0" + rgb-regex "^1.0.1" + rgba-regex "^1.0.0" + +is-data-descriptor@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" + integrity sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y= + dependencies: + kind-of "^3.0.2" + +is-data-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" + integrity sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ== + dependencies: + kind-of "^6.0.0" + +is-date-object@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.2.tgz#bda736f2cd8fd06d32844e7743bfa7494c3bfd7e" + integrity sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g== + +is-decimal@^1.0.0, is-decimal@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.4.tgz#65a3a5958a1c5b63a706e1b333d7cd9f630d3fa5" + integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw== + +is-descriptor@^0.1.0: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" + integrity sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg== + dependencies: + is-accessor-descriptor "^0.1.6" + is-data-descriptor "^0.1.4" + kind-of "^5.0.0" + +is-descriptor@^1.0.0, is-descriptor@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" + integrity sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg== + dependencies: + is-accessor-descriptor "^1.0.0" + is-data-descriptor "^1.0.0" + kind-of "^6.0.2" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + integrity sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE= + +is-docker@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-1.1.0.tgz#f04374d4eee5310e9a8e113bf1495411e46176a1" + integrity sha1-8EN01O7lMQ6ajhE78UlUEeRhdqE= + +is-docker@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.0.0.tgz#2cb0df0e75e2d064fe1864c37cdeacb7b2dcf25b" + integrity sha512-pJEdRugimx4fBMra5z2/5iRdZ63OhYV0vr0Dwm5+xtW4D1FvRkB8hamMIhnWfyJeDdyr/aa7BDyNbtG38VxgoQ== + +is-extendable@^0.1.0, is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= + +is-extendable@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" + integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA== + dependencies: + is-plain-object "^2.0.4" + +is-extglob@^2.1.0, is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-glob@^3.0.0, is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo= + dependencies: + is-extglob "^2.1.0" + +is-glob@^4.0.0, is-glob@^4.0.1, is-glob@~4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== + dependencies: + is-extglob "^2.1.1" + +is-hexadecimal@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7" + integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw== + +is-installed-globally@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.1.0.tgz#0dfd98f5a9111716dd535dda6492f67bf3d25a80" + integrity sha1-Df2Y9akRFxbdU13aZJL2e/PSWoA= + dependencies: + global-dirs "^0.1.0" + is-path-inside "^1.0.0" + +is-installed-globally@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.3.2.tgz#fd3efa79ee670d1187233182d5b0a1dd00313141" + integrity sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g== + dependencies: + global-dirs "^2.0.1" + is-path-inside "^3.0.1" + +is-natural-number@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-natural-number/-/is-natural-number-4.0.1.tgz#ab9d76e1db4ced51e35de0c72ebecf09f734cde8" + integrity sha1-q5124dtM7VHjXeDHLr7PCfc0zeg= + +is-npm@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4" + integrity sha1-8vtjpl5JBbQGyGBydloaTceTufQ= + +is-npm@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-4.0.0.tgz#c90dd8380696df87a7a6d823c20d0b12bbe3c84d" + integrity sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig== + +is-number@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" + integrity sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU= + dependencies: + kind-of "^3.0.2" + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-obj@^1.0.0, is-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + integrity sha1-PkcprB9f3gJc19g6iW2rn09n2w8= + +is-obj@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" + integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== + +is-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.1.tgz#8952688c5ec2ffd6b03ecc85e769e02903083470" + integrity sha1-iVJojF7C/9awPsyF52ngKQMINHA= + +is-path-cwd@^2.0.0, is-path-cwd@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-2.2.0.tgz#67d43b82664a7b5191fd9119127eb300048a9fdb" + integrity sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ== + +is-path-in-cwd@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz#bfe2dca26c69f397265a4009963602935a053acb" + integrity sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ== + dependencies: + is-path-inside "^2.1.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + integrity sha1-jvW33lBDej/cprToZe96pVy0gDY= + dependencies: + path-is-inside "^1.0.1" + +is-path-inside@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-2.1.0.tgz#7c9810587d659a40d27bcdb4d5616eab059494b2" + integrity sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg== + dependencies: + path-is-inside "^1.0.2" + +is-path-inside@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.2.tgz#f5220fc82a3e233757291dddc9c5877f2a1f3017" + integrity sha512-/2UGPSgmtqwo1ktx8NDHjuPwZWmHhO+gj0f93EkhLB5RgW9RZevWYYlIkS6zePc6U2WpOdQYIwHe9YC4DWEBVg== + +is-plain-obj@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= + +is-plain-obj@^2.0.0, is-plain-obj@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + +is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== + dependencies: + isobject "^3.0.1" + +is-plain-object@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-4.1.1.tgz#1a14d6452cbd50790edc7fdaa0aed5a40a35ebb5" + integrity sha512-5Aw8LLVsDlZsETVMhoMXzqsXwQqr/0vlnBYzIXJbYo2F4yYlhLHs+Ez7Bod7IIQKWkJbJfxrWD7pA1Dw1TKrwA== + +is-redirect@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24" + integrity sha1-HQPd7VO9jbDzDCbk+V02/HyH3CQ= + +is-regex@^1.0.4, is-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.0.tgz#ece38e389e490df0dc21caea2bd596f987f767ff" + integrity sha512-iI97M8KTWID2la5uYXlkbSDQIg4F6o1sYboZKKTDpnDQMLtUL86zxhgDet3Q2SriaYsyGqZ6Mn2SjbRKeLHdqw== + dependencies: + has-symbols "^1.0.1" + +is-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" + integrity sha1-/S2INUXEa6xaYz57mgnof6LLUGk= + +is-resolvable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88" + integrity sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg== + +is-retry-allowed@^1.0.0, is-retry-allowed@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" + integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== + +is-root@2.1.0, is-root@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" + integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== + +is-stream@^1.0.0, is-stream@^1.0.1, is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= + +is-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" + integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== + +is-svg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-3.0.0.tgz#9321dbd29c212e5ca99c4fa9794c714bcafa2f75" + integrity sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ== + dependencies: + html-comment-regex "^1.1.0" + +is-symbol@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.3.tgz#38e1014b9e6329be0de9d24a414fd7441ec61937" + integrity sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ== + dependencies: + has-symbols "^1.0.1" + +is-typedarray@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= + +is-url@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/is-url/-/is-url-1.2.4.tgz#04a4df46d28c4cff3d73d01ff06abeb318a1aa52" + integrity sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww== + +is-what@^3.3.1: + version "3.10.0" + resolved "https://registry.yarnpkg.com/is-what/-/is-what-3.10.0.tgz#5fee88ee7105c373c5b7c9324f345ad7e9554327" + integrity sha512-U4RYCXNOmATQHlOPlOCHCfXyKEFIPqvyaKDqYRuLbD6EYKcTTfc3YXkAYjzOVxO3zt34L+Wh2feIyWrYiZ7kng== + +is-whitespace-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz#0858edd94a95594c7c9dd0b5c174ec6e45ee4aa7" + integrity sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w== + +is-windows@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" + integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== + +is-word-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.4.tgz#ce0e73216f98599060592f62ff31354ddbeb0230" + integrity sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA== + +is-wsl@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" + integrity sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0= + +is-wsl@^2.1.1, is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + +is-yarn-global@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/is-yarn-global/-/is-yarn-global-0.3.0.tgz#d502d3382590ea3004893746754c89139973e232" + integrity sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw== + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + integrity sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8= + +isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= + +iserror@0.0.2, iserror@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/iserror/-/iserror-0.0.2.tgz#bd53451fe2f668b9f2402c1966787aaa2c7c0bf5" + integrity sha1-vVNFH+L2aLnyQCwZZnh6qix8C/U= + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk= + dependencies: + isarray "1.0.0" + +isobject@^3.0.0, isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= + +isomorphic-fetch@^2.1.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9" + integrity sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk= + dependencies: + node-fetch "^1.0.1" + whatwg-fetch ">=0.10.0" + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + integrity sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w== + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + +jest-get-type@^24.9.0: + version "24.9.0" + resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-24.9.0.tgz#1684a0c8a50f2e4901b6644ae861f579eed2ef0e" + integrity sha512-lUseMzAley4LhIcpSP9Jf+fTrQ4a1yHQwLNeeVa2cEmbCGeoZAtYPOIv8JaxLD/sUpKxetKGP+gsHl8f8TSj8Q== + +jest-validate@^24.9.0: + version "24.9.0" + resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-24.9.0.tgz#0775c55360d173cd854e40180756d4ff52def8ab" + integrity sha512-HPIt6C5ACwiqSiwi+OfSSHbK8sG7akG8eATl+IPKaeIjtPOeBUd/g3J7DghugzxrGjI93qS/+RPKe1H6PqvhRQ== + dependencies: + "@jest/types" "^24.9.0" + camelcase "^5.3.1" + chalk "^2.0.1" + jest-get-type "^24.9.0" + leven "^3.1.0" + pretty-format "^24.9.0" + +jest-worker@^25.4.0: + version "25.5.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-25.5.0.tgz#2611d071b79cea0f43ee57a3d118593ac1547db1" + integrity sha512-/dsSmUkIy5EBGfv/IjjqmFxrNAUpBERfGs1oHROyD7yxjG/w+t0GOJDX8O1k32ySmd7+a5IhnJU2qQFcJ4n1vw== + dependencies: + merge-stream "^2.0.0" + supports-color "^7.0.0" + +jmespath@0.15.0: + version "0.15.0" + resolved "https://registry.yarnpkg.com/jmespath/-/jmespath-0.15.0.tgz#a3f222a9aae9f966f5d27c796510e28091764217" + integrity sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc= + +js-string-escape@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/js-string-escape/-/js-string-escape-1.0.1.tgz#e2625badbc0d67c7533e9edc1068c587ae4137ef" + integrity sha1-4mJbrbwNZ8dTPp7cEGjFh65BN+8= + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= + +js-yaml@^3.11.0, js-yaml@^3.12.1, js-yaml@^3.13.1, js-yaml@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" + integrity sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + integrity sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0= + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= + +json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + +json-pointer@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/json-pointer/-/json-pointer-0.6.0.tgz#8e500550a6aac5464a473377da57aa6cc22828d7" + integrity sha1-jlAFUKaqxUZKRzN32leqbMIoKNc= + dependencies: + foreach "^2.0.4" + +json-schema-ref-parser@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/json-schema-ref-parser/-/json-schema-ref-parser-6.1.0.tgz#30af34aeab5bee0431da805dac0eb21b574bf63d" + integrity sha512-pXe9H1m6IgIpXmE5JSb8epilNTGsmTb2iPohAXpOdhqGFbQjNeHHsZxU+C8w6T81GZxSPFLeUoqDJmzxx5IGuw== + dependencies: + call-me-maybe "^1.0.1" + js-yaml "^3.12.1" + ono "^4.0.11" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + integrity sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A= + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-to-ast@^2.0.3: + version "2.1.0" + resolved "https://registry.yarnpkg.com/json-to-ast/-/json-to-ast-2.1.0.tgz#041a9fcd03c0845036acb670d29f425cea4faaf9" + integrity sha512-W9Lq347r8tA1DfMvAGn9QNcgYm4Wm7Yc+k8e6vezpMnRT+NHbtlxgNBXRVjXe9YM6eTn6+p/MKOlV/aABJcSnQ== + dependencies: + code-error-fragment "0.0.230" + grapheme-splitter "^1.0.4" + +json3@^3.3.2: + version "3.3.3" + resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.3.tgz#7fc10e375fc5ae42c4705a5cc0aa6f62be305b81" + integrity sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA== + +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== + dependencies: + minimist "^1.2.0" + +json5@^2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.3.tgz#c9b0f7fa9233bfe5807fe66fcf3a5617ed597d43" + integrity sha512-KXPvOm8K9IJKFM0bmdn8QXh7udDh1g/giieX0NLCaMnb4hEiVFqnop2ImTXCc5e0/oHz3LTqmHGtExn5hfMkOA== + dependencies: + minimist "^1.2.5" + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + integrity sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss= + optionalDependencies: + graceful-fs "^4.1.6" + +jsonfile@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.0.1.tgz#98966cba214378c8c84b82e085907b40bf614179" + integrity sha512-jR2b5v7d2vIOust+w3wtFKZIfpC2pnRmFAhAC/BuweZFQR8qZzxH1OyrQ10HmdVYiXWkYUqPVsz91cG7EL2FBg== + dependencies: + universalify "^1.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonpointer@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.1.0.tgz#501fb89986a2389765ba09e6053299ceb4f2c2cc" + integrity sha512-CXcRvMyTlnR53xMcKnuMzfCA5i/nfblTnnr74CZb6C4vG39eu6w51t7nKmU5MfLfbTgGItliNyjO/ciNPDqClg== + +junk@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/junk/-/junk-3.1.0.tgz#31499098d902b7e98c5d9b9c80f43457a88abfa1" + integrity sha512-pBxcB3LFc8QVgdggvZWyeys+hnrNWg4OcZIU/1X59k5jQdLBlCsYGRQaz234SqoRLTCgMH00fY0xRJH+F9METQ== + +jwt-decode@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/jwt-decode/-/jwt-decode-2.2.0.tgz#7d86bd56679f58ce6a84704a657dd392bba81a79" + integrity sha1-fYa9VmefWM5qhHBKZX3TkruoGnk= + +keep-func-props@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/keep-func-props/-/keep-func-props-3.0.1.tgz#2aa8b6f421a7e979b071dbfe747d2003a135ee34" + integrity sha512-5AsrYCiCHIUxuw/G2r7xcoTW/NTf5IFwAe1fkwf2ifM/KZzEojaTylh1Pppu60oEixww1rfcWJaRGLi3eAJsrQ== + dependencies: + mimic-fn "^3.1.0" + +keyv@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" + integrity sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA== + dependencies: + json-buffer "3.0.0" + +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== + dependencies: + json-buffer "3.0.0" + +killable@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/killable/-/killable-1.0.1.tgz#4c8ce441187a061c7474fb87ca08e2a638194892" + integrity sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg== + +kind-of@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5" + integrity sha1-AY7HpM5+OobLkUG+UZ0kyPqpgbU= + dependencies: + is-buffer "^1.0.2" + +kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= + dependencies: + is-buffer "^1.1.5" + +kind-of@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" + integrity sha1-IIE989cSkosgc3hpGkUGb65y3Vc= + dependencies: + is-buffer "^1.1.5" + +kind-of@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" + integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw== + +kind-of@^6.0.0, kind-of@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== + +kuler@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3" + integrity sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A== + +lambda-local@^1.7.1: + version "1.7.3" + resolved "https://registry.yarnpkg.com/lambda-local/-/lambda-local-1.7.3.tgz#3f7e8e4893184ddf95a731004ee49b75089bf534" + integrity sha512-T+iwIkuQT0JvTQhvNBTikLhpEJk3ovNoC33niE4QNmYOUrCOdo86PcPkgppOZl+NJXXHebdPHDJ40zqBJ9VMzg== + dependencies: + aws-sdk "^2.689.0" + commander "^5.1.0" + dotenv "^8.2.0" + winston "^3.2.1" + +last-call-webpack-plugin@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz#9742df0e10e3cf46e5c0381c2de90d3a7a2d7555" + integrity sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w== + dependencies: + lodash "^4.17.5" + webpack-sources "^1.1.0" + +latest-version@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-3.1.0.tgz#a205383fea322b33b5ae3b18abee0dc2f356ee15" + integrity sha1-ogU4P+oyKzO1rjsYq+4NwvNW7hU= + dependencies: + package-json "^4.0.0" + +latest-version@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-5.1.0.tgz#119dfe908fe38d15dfa43ecd13fa12ec8832face" + integrity sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA== + dependencies: + package-json "^6.3.0" + +lazy-cache@^0.2.3: + version "0.2.7" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65" + integrity sha1-f+3fLctu23fRHvHRF6tf/fCrG2U= + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + integrity sha1-odePw6UEdMuAhF07O24dpJpEbo4= + +lazystream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lazystream/-/lazystream-1.0.0.tgz#f6995fe0f820392f61396be89462407bb77168e4" + integrity sha1-9plf4PggOS9hOWvolGJAe7dxaOQ= + dependencies: + readable-stream "^2.0.5" + +lcid@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf" + integrity sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA== + dependencies: + invert-kv "^2.0.0" + +leven@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" + integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== + +levenary@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/levenary/-/levenary-1.1.1.tgz#842a9ee98d2075aa7faeedbe32679e9205f46f77" + integrity sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ== + dependencies: + leven "^3.1.0" + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +lines-and-columns@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" + integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= + +load-json-file@^5.2.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-5.3.0.tgz#4d3c1e01fa1c03ea78a60ac7af932c9ce53403f3" + integrity sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw== + dependencies: + graceful-fs "^4.1.15" + parse-json "^4.0.0" + pify "^4.0.1" + strip-bom "^3.0.0" + type-fest "^0.3.0" + +loader-runner@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357" + integrity sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw== + +loader-utils@1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.2.3.tgz#1ff5dc6911c9f0a062531a4c04b609406108c2c7" + integrity sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA== + dependencies: + big.js "^5.2.2" + emojis-list "^2.0.0" + json5 "^1.0.1" + +loader-utils@^1.1.0, loader-utils@^1.2.3, loader-utils@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.0.tgz#c579b5e34cb34b1a74edc6c1fb36bfa371d5a613" + integrity sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^1.0.1" + +loader-utils@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.0.tgz#e4cace5b816d425a166b5f097e10cd12b36064b0" + integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^2.1.2" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + integrity sha1-K1aLJl7slExtnA3pw9u7ygNUzY4= + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +lodash._reinterpolate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" + integrity sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0= + +lodash.assignin@^4.0.9: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2" + integrity sha1-uo31+4QesKPoBEIysOJjqNxqKKI= + +lodash.bind@^4.1.4: + version "4.2.1" + resolved "https://registry.yarnpkg.com/lodash.bind/-/lodash.bind-4.2.1.tgz#7ae3017e939622ac31b7d7d7dcb1b34db1690d35" + integrity sha1-euMBfpOWIqwxt9fX3LGzTbFpDTU= + +lodash.camelcase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz#b28aa6288a2b9fc651035c7711f65ab6190331a6" + integrity sha1-soqmKIorn8ZRA1x3EfZathkDMaY= + +lodash.chunk@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.chunk/-/lodash.chunk-4.2.0.tgz#66e5ce1f76ed27b4303d8c6512e8d1216e8106bc" + integrity sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw= + +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + integrity sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168= + +lodash.deburr@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/lodash.deburr/-/lodash.deburr-4.1.0.tgz#ddb1bbb3ef07458c0177ba07de14422cb033ff9b" + integrity sha1-3bG7s+8HRYwBd7oH3hRCLLAz/5s= + +lodash.defaults@^4.0.1, lodash.defaults@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-4.2.0.tgz#d09178716ffea4dde9e5fb7b37f6f0802274580c" + integrity sha1-0JF4cW/+pN3p5ft7N/bwgCJ0WAw= + +lodash.difference@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.difference/-/lodash.difference-4.5.0.tgz#9ccb4e505d486b91651345772885a2df27fd017c" + integrity sha1-nMtOUF1Ia5FlE0V3KIWi3yf9AXw= + +lodash.filter@^4.4.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.filter/-/lodash.filter-4.6.0.tgz#668b1d4981603ae1cc5a6fa760143e480b4c4ace" + integrity sha1-ZosdSYFgOuHMWm+nYBQ+SAtMSs4= + +lodash.flatmap@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.flatmap/-/lodash.flatmap-4.5.0.tgz#ef8cbf408f6e48268663345305c6acc0b778702e" + integrity sha1-74y/QI9uSCaGYzRTBcaswLd4cC4= + +lodash.flatten@^4.2.0, lodash.flatten@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" + integrity sha1-8xwiIlqWMtK7+OSt2+8kCqdlph8= + +lodash.flattendeep@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz#fb030917f86a3134e5bc9bec0d69e0013ddfedb2" + integrity sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI= + +lodash.foreach@^4.3.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.foreach/-/lodash.foreach-4.5.0.tgz#1a6a35eace401280c7f06dddec35165ab27e3e53" + integrity sha1-Gmo16s5AEoDH8G3d7DUWWrJ+PlM= + +lodash.get@^4.4.2: + version "4.4.2" + resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99" + integrity sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk= + +lodash.groupby@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.groupby/-/lodash.groupby-4.6.0.tgz#0b08a1dcf68397c397855c3239783832df7403d1" + integrity sha1-Cwih3PaDl8OXhVwyOXg4Mt90A9E= + +lodash.has@^4.5.2: + version "4.5.2" + resolved "https://registry.yarnpkg.com/lodash.has/-/lodash.has-4.5.2.tgz#d19f4dc1095058cccbe2b0cdf4ee0fe4aa37c862" + integrity sha1-0Z9NwQlQWMzL4rDN9O4P5Ko3yGI= + +lodash.isempty@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.isempty/-/lodash.isempty-4.4.0.tgz#6f86cbedd8be4ec987be9aaf33c9684db1b31e7e" + integrity sha1-b4bL7di+TsmHvpqvM8loTbGzHn4= + +lodash.isequal@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.isequal/-/lodash.isequal-4.5.0.tgz#415c4478f2bcc30120c22ce10ed3226f7d3e18e0" + integrity sha1-QVxEePK8wwEgwizhDtMib30+GOA= + +lodash.islength@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.islength/-/lodash.islength-4.0.1.tgz#4e9868d452575d750affd358c979543dc20ed577" + integrity sha1-Tpho1FJXXXUK/9NYyXlUPcIO1Xc= + +lodash.isobject@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/lodash.isobject/-/lodash.isobject-3.0.2.tgz#3c8fb8d5b5bf4bf90ae06e14f2a530a4ed935e1d" + integrity sha1-PI+41bW/S/kK4G4U8qUwpO2TXh0= + +lodash.isplainobject@^4.0.6: + version "4.0.6" + resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz#7c526a52d89b45c45cc690b88163be0497f550cb" + integrity sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs= + +lodash.isstring@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.isstring/-/lodash.isstring-4.0.1.tgz#d527dfb5456eca7cc9bb95d5daeaf88ba54a5451" + integrity sha1-1SfftUVuynzJu5XV2ur4i6VKVFE= + +lodash.kebabcase@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz#8489b1cb0d29ff88195cceca448ff6d6cc295c36" + integrity sha1-hImxyw0p/4gZXM7KRI/21swpXDY= + +lodash.map@^4.4.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.map/-/lodash.map-4.6.0.tgz#771ec7839e3473d9c4cde28b19394c3562f4f6d3" + integrity sha1-dx7Hg540c9nEzeKLGTlMNWL09tM= + +lodash.memoize@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + integrity sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4= + +lodash.merge@^4.4.0, lodash.merge@^4.6.1, lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash.padstart@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b" + integrity sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs= + +lodash.pick@^4.2.1, lodash.pick@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.pick/-/lodash.pick-4.4.0.tgz#52f05610fff9ded422611441ed1fc123a03001b3" + integrity sha1-UvBWEP/53tQiYRRB7R/BI6AwAbM= + +lodash.pickby@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.pickby/-/lodash.pickby-4.6.0.tgz#7dea21d8c18d7703a27c704c15d3b84a67e33aff" + integrity sha1-feoh2MGNdwOifHBMFdO4SmfjOv8= + +lodash.reduce@^4.4.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.reduce/-/lodash.reduce-4.6.0.tgz#f1ab6b839299ad48f784abbf476596f03b914d3b" + integrity sha1-8atrg5KZrUj3hKu/R2WW8DuRTTs= + +lodash.reject@^4.4.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.reject/-/lodash.reject-4.6.0.tgz#80d6492dc1470864bbf583533b651f42a9f52415" + integrity sha1-gNZJLcFHCGS79YNTO2UfQqn1JBU= + +lodash.sample@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/lodash.sample/-/lodash.sample-4.2.1.tgz#5e4291b0c753fa1abeb0aab8fb29df1b66f07f6d" + integrity sha1-XkKRsMdT+hq+sKq4+ynfG2bwf20= + +lodash.set@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/lodash.set/-/lodash.set-4.3.2.tgz#d8757b1da807dde24816b0d6a84bea1a76230b23" + integrity sha1-2HV7HagH3eJIFrDWqEvqGnYjCyM= + +lodash.snakecase@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz#39d714a35357147837aefd64b5dcbb16becd8f8d" + integrity sha1-OdcUo1NXFHg3rv1ktdy7Fr7Nj40= + +lodash.some@^4.4.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.some/-/lodash.some-4.6.0.tgz#1bb9f314ef6b8baded13b549169b2a945eb68e4d" + integrity sha1-G7nzFO9ri63tE7VJFpsqlF62jk0= + +lodash.sortby@^4.6.0, lodash.sortby@^4.7.0: + version "4.7.0" + resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" + integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= + +lodash.template@^4.4.0, lodash.template@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.5.0.tgz#f976195cf3f347d0d5f52483569fe8031ccce8ab" + integrity sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A== + dependencies: + lodash._reinterpolate "^3.0.0" + lodash.templatesettings "^4.0.0" + +lodash.templatesettings@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz#e481310f049d3cf6d47e912ad09313b154f0fb33" + integrity sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ== + dependencies: + lodash._reinterpolate "^3.0.0" + +lodash.toarray@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.toarray/-/lodash.toarray-4.4.0.tgz#24c4bfcd6b2fba38bfd0594db1179d8e9b656561" + integrity sha1-JMS/zWsvuji/0FlNsRedjptlZWE= + +lodash.transform@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.transform/-/lodash.transform-4.6.0.tgz#12306422f63324aed8483d3f38332b5f670547a0" + integrity sha1-EjBkIvYzJK7YSD0/ODMrX2cFR6A= + +lodash.union@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.union/-/lodash.union-4.6.0.tgz#48bb5088409f16f1821666641c44dd1aaae3cd88" + integrity sha1-SLtQiECfFvGCFmZkHETdGqrjzYg= + +lodash.uniq@4.5.0, lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M= + +lodash@^4.17.11, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.5: + version "4.17.15" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" + integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== + +lodash@^4.17.12, lodash@^4.17.19, lodash@^4.5.2: + version "4.17.19" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.19.tgz#e48ddedbe30b3321783c5b4301fbd353bc1e4a4b" + integrity sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ== + +log-process-errors@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/log-process-errors/-/log-process-errors-5.1.2.tgz#5d0f195309d9c725a010587527ade00db1fe1646" + integrity sha512-s4kmYHrzj543xUAIxc/cpmoiGZcbFwKRqqwO49DbgH+hFoSTswi0sYZuJKjUUc73b49MRPQGl0CNl8cx98/Wtg== + dependencies: + chalk "^3.0.0-beta.2" + figures "^3.0.0" + filter-obj "^2.0.1" + jest-validate "^24.9.0" + map-obj "^4.1.0" + moize "^5.4.4" + supports-color "^7.1.0" + +log-symbols@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-2.2.0.tgz#5740e1c5d6f0dfda4ad9323b5332107ef6b4c40a" + integrity sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg== + dependencies: + chalk "^2.0.1" + +logform@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/logform/-/logform-2.2.0.tgz#40f036d19161fc76b68ab50fdc7fe495544492f2" + integrity sha512-N0qPlqfypFx7UHNn4B3lzS/b0uLqt2hmuoa+PpuXNYgozdJYAyauF5Ky0BWVjrxDlMWiT3qN4zPq3vVAfZy7Yg== + dependencies: + colors "^1.2.1" + fast-safe-stringify "^2.0.4" + fecha "^4.2.0" + ms "^2.1.1" + triple-beam "^1.3.0" + +loglevel@^1.6.8: + version "1.6.8" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.8.tgz#8a25fb75d092230ecd4457270d80b54e28011171" + integrity sha512-bsU7+gc9AJ2SqpzxwU3+1fedl8zAntbtC5XYlt3s2j1hJcn2PsXSmgN8TaLG/J1/2mod4+cE/3vNL70/c1RNCA== + +longest-streak@^2.0.1: + version "2.0.4" + resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.4.tgz#b8599957da5b5dab64dee3fe316fa774597d90e4" + integrity sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg== + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lower-case@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-2.0.1.tgz#39eeb36e396115cc05e29422eaea9e692c9408c7" + integrity sha512-LiWgfDLLb1dwbFQZsSglpRj+1ctGnayXz3Uv0/WO8n558JycT5fg6zkNcnW0G68Nn0aEldTFeEfmjCfmqry/rQ== + dependencies: + tslib "^1.10.0" + +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" + integrity sha1-TjNms55/VFfjXxMkvfb4jQv8cwY= + +lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== + +lowercase-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" + integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== + +lru-cache@^4.0.1: + version "4.1.5" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd" + integrity sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g== + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +lunr@2.3.8: + version "2.3.8" + resolved "https://registry.yarnpkg.com/lunr/-/lunr-2.3.8.tgz#a8b89c31f30b5a044b97d2d28e2da191b6ba2072" + integrity sha512-oxMeX/Y35PNFuZoHp+jUj5OSEmLCaIH4KTFJh7a93cHBoFmpw2IoPs22VIz7vyO2YUnx2Tn9dzIwO2P/4quIRg== + +macos-release@^2.2.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/macos-release/-/macos-release-2.4.1.tgz#64033d0ec6a5e6375155a74b1a1eba8e509820ac" + integrity sha512-H/QHeBIN1fIGJX517pvK8IEK53yQOW7YcEI55oYtgjDdoCQQz7eJS94qt5kNrscReEyuD/JcdFCm2XBEcGOITg== + +magic-string@^0.25.1, magic-string@^0.25.2: + version "0.25.7" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.25.7.tgz#3f497d6fd34c669c6798dcb821f2ef31f5445051" + integrity sha512-4CrMT5DOHTDk4HYDlzmwu4FVCcIYI8gauveasrdCu2IKIFOJ3f0v/8MDGJCDL9oD2ppz/Av1b0Nj345H9M+XIA== + dependencies: + sourcemap-codec "^1.4.4" + +make-dir@^1.0.0, make-dir@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" + integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ== + dependencies: + pify "^3.0.0" + +make-dir@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" + integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== + dependencies: + pify "^4.0.1" + semver "^5.6.0" + +make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" + integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== + dependencies: + semver "^6.0.0" + +map-age-cleaner@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" + integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w== + dependencies: + p-defer "^1.0.0" + +map-cache@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" + integrity sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8= + +map-obj@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-4.1.0.tgz#b91221b542734b9f14256c0132c897c5d7256fd5" + integrity sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g== + +map-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" + integrity sha1-7Nyo8TFE5mDxtb1B8S80edmN+48= + dependencies: + object-visit "^1.0.0" + +mark.js@^8.11.1: + version "8.11.1" + resolved "https://registry.yarnpkg.com/mark.js/-/mark.js-8.11.1.tgz#180f1f9ebef8b0e638e4166ad52db879beb2ffc5" + integrity sha1-GA8fnr74sOY45BZq1S24eb6y/8U= + +markdown-escapes@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.4.tgz#c95415ef451499d7602b91095f3c8e8975f78535" + integrity sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg== + +markdown-table@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-2.0.0.tgz#194a90ced26d31fe753d8b9434430214c011865b" + integrity sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A== + dependencies: + repeat-string "^1.0.0" + +marked@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/marked/-/marked-0.7.0.tgz#b64201f051d271b1edc10a04d1ae9b74bb8e5c0e" + integrity sha512-c+yYdCZJQrsRjTPhUx7VKkApw9bwDkNbHUKo1ovgcfDjb2kc8rLuRbIFyXL5WOEUwzSSKo3IXpph2K6DqB/KZg== + +maxstache-stream@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/maxstache-stream/-/maxstache-stream-1.0.4.tgz#9c7f5cab7e5fdd2d90da86143b4e9631ea328040" + integrity sha1-nH9cq35f3S2Q2oYUO06WMeoygEA= + dependencies: + maxstache "^1.0.0" + pump "^1.0.0" + split2 "^1.0.0" + through2 "^2.0.0" + +maxstache@^1.0.0: + version "1.0.7" + resolved "https://registry.yarnpkg.com/maxstache/-/maxstache-1.0.7.tgz#2231d5180ba783d5ecfc31c45fedac7ae4276984" + integrity sha1-IjHVGAung9Xs/DHEX+2seuQnaYQ= + +md5-hex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-2.0.0.tgz#d0588e9f1c74954492ecd24ac0ac6ce997d92e33" + integrity sha1-0FiOnxx0lUSS7NJKwKxs6ZfZLjM= + dependencies: + md5-o-matic "^0.1.1" + +md5-o-matic@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3" + integrity sha1-givM1l4RfFFPqxdrJZRdVBAKA8M= + +md5.js@^1.3.4: + version "1.3.5" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" + integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg== + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +mdast-squeeze-paragraphs@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz#7c4c114679c3bee27ef10b58e2e015be79f1ef97" + integrity sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ== + dependencies: + unist-util-remove "^2.0.0" + +mdast-util-compact@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-compact/-/mdast-util-compact-2.0.1.tgz#cabc69a2f43103628326f35b1acf735d55c99490" + integrity sha512-7GlnT24gEwDrdAwEHrU4Vv5lLWrEer4KOkAiKT9nYstsTad7Oc1TwqT2zIMKRdZF7cTuaf+GA1E4Kv7jJh8mPA== + dependencies: + unist-util-visit "^2.0.0" + +mdast-util-definitions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-3.0.1.tgz#06af6c49865fc63d6d7d30125569e2f7ae3d0a86" + integrity sha512-BAv2iUm/e6IK/b2/t+Fx69EL/AGcq/IG2S+HxHjDJGfLJtd6i9SZUS76aC9cig+IEucsqxKTR0ot3m933R3iuA== + dependencies: + unist-util-visit "^2.0.0" + +mdast-util-heading-range@^2.0.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/mdast-util-heading-range/-/mdast-util-heading-range-2.1.4.tgz#152d1c71affb6172b1bbf5fee01072bf1b058709" + integrity sha512-ea+YwoFQZiwSf5TLlk9qtKb0AUKsn1oCzdskn2SXsHylA/vW9ZxmMzuCNsFi9siWW1WS1/JSOipX2brUwisIHA== + dependencies: + mdast-util-to-string "^1.0.0" + +mdast-util-to-hast@9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-9.1.0.tgz#6ef121dd3cd3b006bf8650b1b9454da0faf79ffe" + integrity sha512-Akl2Vi9y9cSdr19/Dfu58PVwifPXuFt1IrHe7l+Crme1KvgUT+5z+cHLVcQVGCiNTZZcdqjnuv9vPkGsqWytWA== + dependencies: + "@types/mdast" "^3.0.0" + "@types/unist" "^2.0.3" + collapse-white-space "^1.0.0" + detab "^2.0.0" + mdast-util-definitions "^3.0.0" + mdurl "^1.0.0" + trim-lines "^1.0.0" + unist-builder "^2.0.0" + unist-util-generated "^1.0.0" + unist-util-position "^3.0.0" + unist-util-visit "^2.0.0" + +mdast-util-to-string@^1.0.0, mdast-util-to-string@^1.0.2, mdast-util-to-string@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-1.1.0.tgz#27055500103f51637bd07d01da01eb1967a43527" + integrity sha512-jVU0Nr2B9X3MU4tSK7JP1CMkSvOj7X5l/GboG1tKRw52lLF1x2Ju92Ms9tNetCcbfX3hzlM73zYo2NKkWSfF/A== + +mdn-data@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.4.tgz#699b3c38ac6f1d728091a64650b65d388502fd5b" + integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA== + +mdn-data@2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.6.tgz#852dc60fcaa5daa2e8cf6c9189c440ed3e042978" + integrity sha512-rQvjv71olwNHgiTbfPZFkJtjNMciWgswYeciZhtvWLO8bmX3TnhyA62I6sTWOyZssWHJJjY6/KiWwqQsWWsqOA== + +mdurl@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e" + integrity sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4= + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g= + +mem@^4.0.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/mem/-/mem-4.3.0.tgz#461af497bc4ae09608cdb2e60eefb69bff744178" + integrity sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w== + dependencies: + map-age-cleaner "^0.1.1" + mimic-fn "^2.0.0" + p-is-promise "^2.0.0" + +memoize-one@^5.0.0, memoize-one@^5.1.1, memoize-one@~5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/memoize-one/-/memoize-one-5.1.1.tgz#047b6e3199b508eaec03504de71229b8eb1d75c0" + integrity sha512-HKeeBpWvqiVJD57ZUAsJNm71eHTykffzcLZVYWiVfQeI1rJtuEaS7hQiEpWfVVk18donPwJEcFKIkCmPJNOhHA== + +memory-fs@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" + integrity sha1-OpoguEYlI+RHz7x+i7gO1me/xVI= + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +memory-fs@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.5.0.tgz#324c01288b88652966d161db77838720845a8e3c" + integrity sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA== + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +merge-anything@^2.2.4: + version "2.4.4" + resolved "https://registry.yarnpkg.com/merge-anything/-/merge-anything-2.4.4.tgz#6226b2ac3d3d3fc5fb9e8d23aa400df25f98fdf0" + integrity sha512-l5XlriUDJKQT12bH+rVhAHjwIuXWdAIecGwsYjv2LJo+dA1AeRTmeQS+3QBpO6lEthBMDi2IUMpLC1yyRvGlwQ== + dependencies: + is-what "^3.3.1" + +merge-deep@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/merge-deep/-/merge-deep-3.0.2.tgz#f39fa100a4f1bd34ff29f7d2bf4508fbb8d83ad2" + integrity sha512-T7qC8kg4Zoti1cFd8Cr0M+qaZfOwjlPDEdZIIPPB2JZctjaPM4fX+i7HOId69tAti2fvO6X5ldfYUONDODsrkA== + dependencies: + arr-union "^3.1.0" + clone-deep "^0.2.4" + kind-of "^3.0.2" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E= + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.2.3, merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= + +micro-api-client@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/micro-api-client/-/micro-api-client-3.3.0.tgz#52dd567d322f10faffe63d19d4feeac4e4ffd215" + integrity sha512-y0y6CUB9RLVsy3kfgayU28746QrNMpSm9O/AYGNsBgOkJr/X/Jk0VLGoO8Ude7Bpa8adywzF+MzXNZRFRsNPhg== + +micro-memoize@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/micro-memoize/-/micro-memoize-2.1.2.tgz#0787eeb1a12b4033a0fe162dfc9df4280291cee4" + integrity sha512-COjNutiFgnDHXZEIM/jYuZPwq2h8zMUeScf6Sh6so98a+REqdlpaNS7Cb2ffGfK5I+xfgoA3Rx49NGuNJTJq3w== + +microevent.ts@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/microevent.ts/-/microevent.ts-0.1.1.tgz#70b09b83f43df5172d0205a63025bce0f7357fa0" + integrity sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g== + +micromatch@^3.1.10, micromatch@^3.1.4: + version "3.1.10" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" + integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg== + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + braces "^2.3.1" + define-property "^2.0.2" + extend-shallow "^3.0.2" + extglob "^2.0.4" + fragment-cache "^0.2.1" + kind-of "^6.0.2" + nanomatch "^1.2.9" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.2" + +micromatch@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" + integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== + dependencies: + braces "^3.0.1" + picomatch "^2.0.5" + +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA== + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + +mime-db@1.44.0, "mime-db@>= 1.43.0 < 2", mime-db@^1.28.0: + version "1.44.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.44.0.tgz#fa11c5eb0aca1334b4233cb4d52f10c5a6272f92" + integrity sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg== + +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ== + +mime-types@2.1.18: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ== + dependencies: + mime-db "~1.33.0" + +mime-types@^2.1.12, mime-types@^2.1.26, mime-types@~2.1.17, mime-types@~2.1.24: + version "2.1.27" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.27.tgz#47949f98e279ea53119f5722e0f34e529bec009f" + integrity sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w== + dependencies: + mime-db "1.44.0" + +mime@1.6.0, mime@^1.2.11: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +mime@^2.4.4: + version "2.4.6" + resolved "https://registry.yarnpkg.com/mime/-/mime-2.4.6.tgz#e5b407c90db442f2beb5b162373d07b69affa4d1" + integrity sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA== + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + +mimic-fn@^2.0.0, mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +mimic-fn@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-3.1.0.tgz#65755145bbf3e36954b949c16450427451d5ca74" + integrity sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ== + +mimic-response@^1.0.0, mimic-response@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" + integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== + +mini-create-react-context@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/mini-create-react-context/-/mini-create-react-context-0.4.0.tgz#df60501c83151db69e28eac0ef08b4002efab040" + integrity sha512-b0TytUgFSbgFJGzJqXPKCFCBWigAjpjo+Fl7Vf7ZbKRDptszpppKxXH6DRXEABZ/gcEQczeb0iZ7JvL8e8jjCA== + dependencies: + "@babel/runtime" "^7.5.5" + tiny-warning "^1.0.3" + +mini-css-extract-plugin@^0.8.0: + version "0.8.2" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-0.8.2.tgz#a875e169beb27c88af77dd962771c9eedc3da161" + integrity sha512-a3Y4of27Wz+mqK3qrcd3VhYz6cU0iW5x3Sgvqzbj+XmlrSizmvu8QQMl5oMYJjgHOC4iyt+w7l4umP+dQeW3bw== + dependencies: + loader-utils "^1.1.0" + normalize-url "1.9.1" + schema-utils "^1.0.0" + webpack-sources "^1.1.0" + +minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" + integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== + +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" + integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= + +minimatch@3.0.4, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimist@^1.2.0, minimist@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" + integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + +minipass-collect@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-1.0.2.tgz#22b813bf745dc6edba2576b940022ad6edc8c617" + integrity sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA== + dependencies: + minipass "^3.0.0" + +minipass-flush@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/minipass-flush/-/minipass-flush-1.0.5.tgz#82e7135d7e89a50ffe64610a787953c4c4cbb373" + integrity sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw== + dependencies: + minipass "^3.0.0" + +minipass-pipeline@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.3.tgz#55f7839307d74859d6e8ada9c3ebe72cec216a34" + integrity sha512-cFOknTvng5vqnwOpDsZTWhNll6Jf8o2x+/diplafmxpuIymAjzoOolZG0VvQf3V2HgqzJNhnuKHYp2BqDgz8IQ== + dependencies: + minipass "^3.0.0" + +minipass@^3.0.0, minipass@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.3.tgz#7d42ff1f39635482e15f9cdb53184deebd5815fd" + integrity sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg== + dependencies: + yallist "^4.0.0" + +mississippi@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mississippi/-/mississippi-3.0.0.tgz#ea0a3291f97e0b5e8776b363d5f0a12d94c67022" + integrity sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA== + dependencies: + concat-stream "^1.5.0" + duplexify "^3.4.2" + end-of-stream "^1.1.0" + flush-write-stream "^1.0.0" + from2 "^2.1.0" + parallel-transform "^1.1.0" + pump "^3.0.0" + pumpify "^1.3.3" + stream-each "^1.1.0" + through2 "^2.0.0" + +mixin-deep@^1.2.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566" + integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA== + dependencies: + for-in "^1.0.2" + is-extendable "^1.0.1" + +mixin-object@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e" + integrity sha1-T7lJRB2rGCVA8f4DW6YOGUel5X4= + dependencies: + for-in "^0.1.3" + is-extendable "^0.1.1" + +mkdirp@^0.5.1, mkdirp@^0.5.3, mkdirp@~0.5.1: + version "0.5.5" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" + integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== + dependencies: + minimist "^1.2.5" + +mobx-react-lite@^1.4.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/mobx-react-lite/-/mobx-react-lite-1.5.2.tgz#c4395b0568b9cb16f07669d8869cc4efa1b8656d" + integrity sha512-PyZmARqqWtpuQaAoHF5pKX7h6TKNLwq6vtovm4zZvG6sEbMRHHSqioGXSeQbpRmG8Kw8uln3q/W1yMO5IfL5Sg== + +mobx-react@6.1.5: + version "6.1.5" + resolved "https://registry.yarnpkg.com/mobx-react/-/mobx-react-6.1.5.tgz#66a6f67bfe845216abc05d3aea47ceec8e31e2dd" + integrity sha512-EfWoXmGE2CfozH4Xirb65+il1ynHFCmxBSUabMSf+511YfjVs6QRcCrHkiVw+Il8iWp1gIyfa9qKkUgbDA9/2w== + dependencies: + mobx-react-lite "^1.4.2" + +mobx@^4.3.1: + version "4.15.4" + resolved "https://registry.yarnpkg.com/mobx/-/mobx-4.15.4.tgz#644eac80bdd15793855194e764c475041101b406" + integrity sha512-nyuHPqmKnVOnbvkjR8OrijBtovxAHYC+JU8/qBqvBw4Dez/n+zzxqNHbZNFy7/07+wwc/Qz7JS9WSfy1LcYISA== + +module-definition@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/module-definition/-/module-definition-3.3.0.tgz#aae06d68c99c5f93841e59b8a4469b974956d4d4" + integrity sha512-HTplA9xwDzH67XJFC1YvZMUElWJD28DV0dUq7lhTs+JKJamUOWA/CcYWSlhW5amJO66uWtY7XdltT+LfX0wIVg== + dependencies: + ast-module-types "^2.6.0" + node-source-walk "^4.0.0" + +moize@^5.4.4, moize@^5.4.7: + version "5.4.7" + resolved "https://registry.yarnpkg.com/moize/-/moize-5.4.7.tgz#bffa28806441d9f5cf1c4158b67a29413c438e83" + integrity sha512-7PZH8QFJ51cIVtDv7wfUREBd3gL59JB0v/ARA3RI9zkSRa9LyGjS1Bdldii2J1/NQXRQ/3OOVOSdnZrCcVaZlw== + dependencies: + fast-equals "^1.6.0" + fast-stringify "^1.1.0" + micro-memoize "^2.1.1" + +move-concurrently@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" + integrity sha1-viwAX9oy4LKa8fBdfEszIUxwH5I= + dependencies: + aproba "^1.1.1" + copy-concurrently "^1.0.0" + fs-write-stream-atomic "^1.0.8" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.3" + +move-file@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/move-file/-/move-file-1.2.0.tgz#789f92d276c62511d214b1b285aa16e015c2f2fc" + integrity sha512-USHrRmxzGowUWAGBbJPdFjHzEqtxDU03pLHY0Rfqgtnq+q8FOIs8wvkkf+Udmg77SJKs47y9sI0jJvQeYsmiCA== + dependencies: + cp-file "^6.1.0" + make-dir "^3.0.0" + path-exists "^3.0.0" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + +ms@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== + +ms@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +multicast-dns-service-types@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz#899f11d9686e5e05cb91b35d5f0e63b773cfc901" + integrity sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE= + +multicast-dns@^6.0.1: + version "6.2.3" + resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-6.2.3.tgz#a0ec7bd9055c4282f790c3c82f4e28db3b31b229" + integrity sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g== + dependencies: + dns-packet "^1.3.1" + thunky "^1.0.2" + +multiparty@^4.2.1: + version "4.2.2" + resolved "https://registry.yarnpkg.com/multiparty/-/multiparty-4.2.2.tgz#bee5fb5737247628d39dab4979ffd6d57bf60ef6" + integrity sha512-NtZLjlvsjcoGrzojtwQwn/Tm90aWJ6XXtPppYF4WmOk/6ncdwMMKggFY2NlRRN9yiCEIVxpOfPWahVEG2HAG8Q== + dependencies: + http-errors "~1.8.0" + safe-buffer "5.2.1" + uid-safe "2.1.5" + +mute-stream@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" + integrity sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s= + +mute-stream@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" + integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== + +nan@^2.12.1: + version "2.14.1" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.1.tgz#d7be34dfa3105b91494c3147089315eff8874b01" + integrity sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw== + +nanomatch@^1.2.9: + version "1.2.13" + resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" + integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA== + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + define-property "^2.0.2" + extend-shallow "^3.0.2" + fragment-cache "^0.2.1" + is-windows "^1.0.2" + kind-of "^6.0.2" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +natural-orderby@^2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/natural-orderby/-/natural-orderby-2.0.3.tgz#8623bc518ba162f8ff1cdb8941d74deb0fdcc016" + integrity sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q== + +negotiator@0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" + integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== + +neo-async@^2.5.0, neo-async@^2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c" + integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw== + +nested-error-stacks@^2.0.0, nested-error-stacks@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/nested-error-stacks/-/nested-error-stacks-2.1.0.tgz#0fbdcf3e13fe4994781280524f8b96b0cdff9c61" + integrity sha512-AO81vsIO1k1sM4Zrd6Hu7regmJN1NSiAja10gc4bX3F0wd+9rQmcuHQaHVQCYIEC8iFXnE+mavh23GOt7wBgug== + +netlify-cli@^2.59.0: + version "2.59.0" + resolved "https://registry.yarnpkg.com/netlify-cli/-/netlify-cli-2.59.0.tgz#06449323f0f404165c67c86bf6965b6bd1c81450" + integrity sha512-cRY8dPcJ0ybsYiEVyaSFPbpEUIiY6fiUCJj4L2Vxey1BVZDduOV1AhU3t5Cb4FftMACrxdCWMY0dZ4yhFIT1iQ== + dependencies: + "@netlify/build" "^3.1.2" + "@netlify/config" "^2.0.6" + "@netlify/zip-it-and-ship-it" "^1.3.6" + "@oclif/command" "^1.5.18" + "@oclif/config" "^1.13.2" + "@oclif/errors" "^1.1.2" + "@oclif/parser" "^3.8.4" + "@oclif/plugin-help" "^2.2.0" + "@oclif/plugin-not-found" "^1.1.4" + "@oclif/plugin-plugins" "^1.7.8" + "@octokit/rest" "^16.28.1" + ansi-styles "^3.2.1" + ascii-table "0.0.9" + body-parser "^1.19.0" + boxen "^4.1.0" + chalk "^2.4.2" + chokidar "^3.0.2" + ci-info "^2.0.0" + clean-deep "^3.0.2" + cli-spinners "^1.3.1" + cli-ux "^5.2.1" + concordance "^4.0.0" + configstore "^5.0.0" + content-type "^1.0.4" + cookie "^0.4.0" + copy-template-dir "^1.4.0" + debug "^4.1.1" + dot-prop "^5.1.0" + dotenv "^8.2.0" + envinfo "^7.3.1" + execa "^2.0.3" + express "^4.17.1" + express-logging "^1.1.1" + find-up "^3.0.0" + fs-extra "^8.1.0" + fuzzy "^0.1.3" + get-port "^5.1.0" + gh-release-fetch "^1.0.3" + git-repo-info "^2.1.0" + gitconfiglocal "^2.1.0" + http-proxy "^1.18.0" + http-proxy-middleware "^0.21.0" + inquirer "^6.5.1" + inquirer-autocomplete-prompt "^1.0.1" + is-docker "^1.1.0" + jwt-decode "^2.2.0" + lambda-local "^1.7.1" + lodash.debounce "^4.0.8" + lodash.get "^4.4.2" + lodash.isempty "^4.4.0" + lodash.isequal "^4.5.0" + lodash.isobject "^3.0.2" + lodash.merge "^4.6.2" + lodash.pick "^4.4.0" + lodash.sample "^4.2.1" + lodash.snakecase "^4.1.1" + log-symbols "^2.2.0" + make-dir "^3.0.0" + minimist "^1.2.5" + multiparty "^4.2.1" + netlify "^4.3.7" + netlify-redirect-parser "^2.5.0" + netlify-redirector "^0.2.0" + node-fetch "^2.6.0" + npm-packlist "^1.4.4" + open "^6.4.0" + ora "^3.4.0" + p-wait-for "^2.0.0" + parse-github-url "^1.0.2" + parse-gitignore "^1.0.1" + precinct "^6.1.2" + prettyjson "^1.2.1" + random-item "^1.0.0" + raw-body "^2.4.1" + read-pkg-up "^6.0.0" + require-package-name "^2.0.1" + resolve "^1.12.0" + safe-join "^0.1.3" + static-server "^2.2.1" + strip-ansi-control-characters "^2.0.0" + update-notifier "^2.5.0" + uuid "^3.3.3" + wait-port "^0.2.2" + which "^2.0.2" + winston "^3.2.1" + wrap-ansi "^6.0.0" + write-file-atomic "^3.0.0" + +netlify-redirect-parser@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/netlify-redirect-parser/-/netlify-redirect-parser-2.5.0.tgz#9110c567f2c4bbc5dacf34dc8976b2b77ca303b6" + integrity sha512-pF8BiOr3Pa4kQLLiOu53I0d30EIUDM0DYqYvCQmKD96cMX2qLh/QsxT0Zh18IrL5a0IWQ236/o76lTe0yEEw6w== + dependencies: + "@netlify/config" "^0.11.5" + lodash.isplainobject "^4.0.6" + +netlify-redirector@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/netlify-redirector/-/netlify-redirector-0.2.0.tgz#2037bf92520459277c9ae17f46e4df56104cd787" + integrity sha512-5SSUu++MXvE/tik90Hx7lzISBHCl5k4TqpVeTuBEoHp5K7uWitY7c3MjPNiY3kB83GSZiTNLbuIY7bo6mpyU3Q== + +netlify@^4.1.7, netlify@^4.3.10, netlify@^4.3.7: + version "4.3.10" + resolved "https://registry.yarnpkg.com/netlify/-/netlify-4.3.10.tgz#ba07df396807f4313ee76e42ccedf7bb4d0e109a" + integrity sha512-sBnOQYaihYTA70VUL3ks7ydH25ccklhXwm9/7fPtnVgTRt/yvGpJMylHzf6XT+JGGidwYRYxfcMn5qGbdjMrxw== + dependencies: + "@netlify/open-api" "^0.15.0" + "@netlify/zip-it-and-ship-it" "^1.3.9" + backoff "^2.5.0" + clean-deep "^3.3.0" + filter-obj "^2.0.1" + flush-write-stream "^2.0.0" + folder-walker "^3.2.0" + from2-array "0.0.4" + hasha "^5.0.0" + lodash.camelcase "^4.3.0" + lodash.flatten "^4.4.0" + lodash.get "^4.4.2" + lodash.set "^4.3.2" + micro-api-client "^3.3.0" + node-fetch "^2.2.0" + p-map "^3.0.0" + p-wait-for "^3.1.0" + parallel-transform "^1.1.0" + pump "^3.0.0" + qs "^6.9.3" + rimraf "^3.0.2" + tempy "^0.3.0" + through2-filter "^3.0.0" + through2-map "^3.0.0" + +nice-try@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" + integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== + +njct@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/njct/-/njct-8.0.0.tgz#7e2dd91d9eb9315cc6a2a0d7c29199f27d5f917d" + integrity sha512-TfPRCui1nHgpE6/LaApQVHekpnycXI5mFQG+7Wr9EZ1uN96UZTHKKIzyywGW9g4WnoQzCWb4cKUd/d8M4Rdoeg== + dependencies: + tslib "^1.9.0" + +no-case@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-3.0.3.tgz#c21b434c1ffe48b39087e86cfb4d2582e9df18f8" + integrity sha512-ehY/mVQCf9BL0gKfsJBvFJen+1V//U+0HQMPrWct40ixE4jnv0bfvxDbWtAHL9EcaPEOJHVVYKoQn1TlZUB8Tw== + dependencies: + lower-case "^2.0.1" + tslib "^1.10.0" + +node-emoji@^1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.10.0.tgz#8886abd25d9c7bb61802a658523d1f8d2a89b2da" + integrity sha512-Yt3384If5H6BYGVHiHwTL+99OzJKHhgp82S8/dktEK73T26BazdgZ4JZh92xSVtGNJvz9UbXdNAc5hcrXV42vw== + dependencies: + lodash.toarray "^4.4.0" + +node-fetch-h2@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz#c6188325f9bd3d834020bf0f2d6dc17ced2241ac" + integrity sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg== + dependencies: + http2-client "^1.2.5" + +node-fetch@^1.0.1: + version "1.7.3" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef" + integrity sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ== + dependencies: + encoding "^0.1.11" + is-stream "^1.0.1" + +node-fetch@^2.2.0, node-fetch@^2.3.0, node-fetch@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.0.tgz#e633456386d4aa55863f676a7ab0daa8fdecb0fd" + integrity sha512-8dG4H5ujfvFiqDmVu9fQ5bOHUC15JMjMY/Zumv26oOvvVJjM67KF8koCWIabKQ1GJIa9r2mMZscBq/TbdOcmNA== + +node-forge@0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.9.0.tgz#d624050edbb44874adca12bb9a52ec63cb782579" + integrity sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ== + +node-libs-browser@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.2.1.tgz#b64f513d18338625f90346d27b0d235e631f6425" + integrity sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q== + dependencies: + assert "^1.1.1" + browserify-zlib "^0.2.0" + buffer "^4.3.0" + console-browserify "^1.1.0" + constants-browserify "^1.0.0" + crypto-browserify "^3.11.0" + domain-browser "^1.1.1" + events "^3.0.0" + https-browserify "^1.0.0" + os-browserify "^0.3.0" + path-browserify "0.0.1" + process "^0.11.10" + punycode "^1.2.4" + querystring-es3 "^0.2.0" + readable-stream "^2.3.3" + stream-browserify "^2.0.1" + stream-http "^2.7.2" + string_decoder "^1.0.0" + timers-browserify "^2.0.4" + tty-browserify "0.0.0" + url "^0.11.0" + util "^0.11.0" + vm-browserify "^1.0.1" + +node-readfiles@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/node-readfiles/-/node-readfiles-0.2.0.tgz#dbbd4af12134e2e635c245ef93ffcf6f60673a5d" + integrity sha1-271K8SE04uY1wkXvk//Pb2BnOl0= + dependencies: + es6-promise "^3.2.1" + +node-releases@^1.1.52, node-releases@^1.1.53: + version "1.1.58" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.58.tgz#8ee20eef30fa60e52755fcc0942def5a734fe935" + integrity sha512-NxBudgVKiRh/2aPWMgPR7bPTX0VPmGx5QBwCtdHitnqFE5/O8DeBXuIMH1nwNnw/aMo6AjOrpsHzfY3UbUJ7yg== + +node-source-walk@^4.0.0, node-source-walk@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/node-source-walk/-/node-source-walk-4.2.0.tgz#c2efe731ea8ba9c03c562aa0a9d984e54f27bc2c" + integrity sha512-hPs/QMe6zS94f5+jG3kk9E7TNm4P2SulrKiLWMzKszBfNZvL/V6wseHlTd7IvfW0NZWqPtK3+9yYNr+3USGteA== + dependencies: + "@babel/parser" "^7.0.0" + +noop2@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/noop2/-/noop2-2.0.0.tgz#4b636015e9882b54783c02b412f699d8c5cd0a5b" + integrity sha1-S2NgFemIK1R4PAK0EvaZ2MXNCls= + +nopt@~1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee" + integrity sha1-bd0hvSoxQXuScn3Vhfim83YI6+4= + dependencies: + abbrev "1" + +normalize-package-data@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" + integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA== + dependencies: + hosted-git-info "^2.1.4" + resolve "^1.10.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk= + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + integrity sha1-LRDAa9/TEuqXd2laTShDlFa3WUI= + +normalize-url@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.1.tgz#2cc0d66b31ea23036458436e3620d85954c66c3c" + integrity sha1-LMDWazHqIwNkWENuNiDYWVTGbDw= + dependencies: + object-assign "^4.0.1" + prepend-http "^1.0.0" + query-string "^4.1.0" + sort-keys "^1.0.0" + +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" + integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw== + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + +normalize-url@^3.0.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-3.3.0.tgz#b2e1c4dc4f7c6d57743df733a4f5978d18650559" + integrity sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg== + +normalize-url@^4.1.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.0.tgz#453354087e6ca96957bd8f5baf753f5982142129" + integrity sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ== + +npm-bundled@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.1.1.tgz#1edd570865a94cdb1bc8220775e29466c9fb234b" + integrity sha512-gqkfgGePhTpAEgUsGEgcq1rqPXA+tv/aVBlgEzfXwA1yiUJF7xtEt3CtVwOjNYQOVknDk0F20w58Fnm3EtG0fA== + dependencies: + npm-normalize-package-bin "^1.0.1" + +npm-conf@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/npm-conf/-/npm-conf-1.1.3.tgz#256cc47bd0e218c259c4e9550bf413bc2192aff9" + integrity sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw== + dependencies: + config-chain "^1.1.11" + pify "^3.0.0" + +npm-normalize-package-bin@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz#6e79a41f23fd235c0623218228da7d9c23b8f6e2" + integrity sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA== + +npm-packlist@^1.4.4: + version "1.4.8" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.8.tgz#56ee6cc135b9f98ad3d51c1c95da22bbb9b2ef3e" + integrity sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A== + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + npm-normalize-package-bin "^1.0.1" + +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8= + dependencies: + path-key "^2.0.0" + +npm-run-path@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-3.1.0.tgz#7f91be317f6a466efed3c9f2980ad8a4ee8b0fa5" + integrity sha512-Dbl4A/VfiVGLgQv29URL9xshU8XDY1GeLy+fsaZ1AA8JDSfjvr5P5+pzRbWqRSBxk6/DW7MIh8lTM/PaGnP2kg== + dependencies: + path-key "^3.0.0" + +npm-run-path@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +nprogress@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/nprogress/-/nprogress-0.2.0.tgz#cb8f34c53213d895723fcbab907e9422adbcafb1" + integrity sha1-y480xTIT2JVyP8urkH6UIq28r7E= + +nth-check@^1.0.2, nth-check@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.2.tgz#b2bd295c37e3dd58a3bf0700376663ba4d9cf05c" + integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg== + dependencies: + boolbase "~1.0.0" + +null-loader@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/null-loader/-/null-loader-3.0.0.tgz#3e2b6c663c5bda8c73a54357d8fa0708dc61b245" + integrity sha512-hf5sNLl8xdRho4UPBOOeoIwT3WhjYcMUQm0zj44EhD6UscMAz72o2udpoDFBgykucdEDGIcd6SXbc/G6zssbzw== + dependencies: + loader-utils "^1.2.3" + schema-utils "^1.0.0" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + integrity sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4= + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= + +oas-kit-common@^1.0.7, oas-kit-common@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/oas-kit-common/-/oas-kit-common-1.0.8.tgz#6d8cacf6e9097967a4c7ea8bcbcbd77018e1f535" + integrity sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ== + dependencies: + fast-safe-stringify "^2.0.7" + +oas-linter@^3.1.0: + version "3.1.3" + resolved "https://registry.yarnpkg.com/oas-linter/-/oas-linter-3.1.3.tgz#1526b3da32a1bbf124d720f27fd4eb9971cebfff" + integrity sha512-jFWBHjSoqODGo7cKA/VWqqWSLbHNtnyCEpa2nMMS64SzCUbZDk63Oe7LqQZ2qJA0K2VRreYLt6cVkYy6MqNRDg== + dependencies: + should "^13.2.1" + yaml "^1.8.3" + +oas-resolver@^2.3.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/oas-resolver/-/oas-resolver-2.4.1.tgz#46948226f73e514ac6733f166cc559e800e4389b" + integrity sha512-rRmUv9mDTKPtsB2OGaoNMK4BC1Q/pL+tWRPKRjXJEBoLmfegJhecOZPBtIR0gKEVQb9iAA0MqulkgY43EiCFDg== + dependencies: + node-fetch-h2 "^2.3.0" + oas-kit-common "^1.0.8" + reftools "^1.1.3" + yaml "^1.8.3" + yargs "^15.3.1" + +oas-schema-walker@^1.1.3: + version "1.1.4" + resolved "https://registry.yarnpkg.com/oas-schema-walker/-/oas-schema-walker-1.1.4.tgz#4b9d090c3622039741334d3e138510ff38197618" + integrity sha512-foVDDS0RJYMfhQEDh/WdBuCzydTcsCnGo9EeD8SpWq1uW10JXiz+8SfYVDA7LO87kjmlnTRZle/2gr5qxabaEA== + +oas-validator@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/oas-validator/-/oas-validator-3.4.0.tgz#7633b02e495af4a4e0224b249288b0928748476d" + integrity sha512-l/SxykuACi2U51osSsBXTxdsFc8Fw41xI7AsZkzgVgWJAzoEFaaNptt35WgY9C3757RUclsm6ye5GvSyYoozLQ== + dependencies: + ajv "^5.5.2" + better-ajv-errors "^0.6.7" + call-me-maybe "^1.0.1" + oas-kit-common "^1.0.7" + oas-linter "^3.1.0" + oas-resolver "^2.3.0" + oas-schema-walker "^1.1.3" + reftools "^1.1.0" + should "^13.2.1" + yaml "^1.8.3" + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +object-copy@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" + integrity sha1-fn2Fi3gb18mRpBupde04EnVOmYw= + dependencies: + copy-descriptor "^0.1.0" + define-property "^0.2.5" + kind-of "^3.0.3" + +object-inspect@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.7.0.tgz#f4f6bd181ad77f006b5ece60bd0b6f398ff74a67" + integrity sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw== + +object-is@^1.0.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.2.tgz#c5d2e87ff9e119f78b7a088441519e2eec1573b6" + integrity sha512-5lHCz+0uufF6wZ7CRFWJN3hp8Jqblpgve06U5CMQ3f//6iDjPr2PEo9MWCjEssDsa+UZEL4PkFpr+BMop6aKzQ== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.5" + +object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object-treeify@^1.1.4: + version "1.1.26" + resolved "https://registry.yarnpkg.com/object-treeify/-/object-treeify-1.1.26.tgz#80b882036cb5d0fddc16e8a2affa98ba96f89a74" + integrity sha512-0WTfU7SGM8umY4YPpOg+oHXL66E6dPVCr+sMR6KitPmvg8CkVrHUUZYEFtx0+5Wb0HjFEsBwBYXyGRNeX7c/oQ== + +object-visit@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" + integrity sha1-95xEk68MU3e1n+OdOV5BBC3QRbs= + dependencies: + isobject "^3.0.0" + +object.assign@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.getownpropertydescriptors@^2.0.3, object.getownpropertydescriptors@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz#369bf1f9592d8ab89d712dced5cb81c7c5352649" + integrity sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + +object.pick@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" + integrity sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c= + dependencies: + isobject "^3.0.1" + +object.values@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.1.tgz#68a99ecde356b7e9295a3c5e0ce31dc8c953de5e" + integrity sha512-WTa54g2K8iu0kmS/us18jEmdv1a4Wi//BZ/DTVYEcH0XhLM5NYdpDHja3gt57VrZLcNAO2WGA+KpWsDBaHt6eA== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + function-bind "^1.1.1" + has "^1.0.3" + +obuf@^1.0.0, obuf@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e" + integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg== + +octokit-pagination-methods@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/octokit-pagination-methods/-/octokit-pagination-methods-1.1.0.tgz#cf472edc9d551055f9ef73f6e42b4dbb4c80bea4" + integrity sha512-fZ4qZdQ2nxJvtcasX7Ghl+WlWS/d9IgnBIwFZXVNNZUmzpno91SX5bc5vuxiuKoCtK78XxGGNuSCrDC7xYB3OQ== + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc= + dependencies: + ee-first "1.1.1" + +on-headers@^1.0.0, on-headers@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +one-time@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/one-time/-/one-time-1.0.0.tgz#e06bc174aed214ed58edede573b433bbf827cb45" + integrity sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g== + dependencies: + fn.name "1.x.x" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ= + dependencies: + mimic-fn "^1.0.0" + +onetime@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.0.tgz#fff0f3c91617fe62bb50189636e99ac8a6df7be5" + integrity sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q== + dependencies: + mimic-fn "^2.1.0" + +ono@^4.0.11: + version "4.0.11" + resolved "https://registry.yarnpkg.com/ono/-/ono-4.0.11.tgz#c7f4209b3e396e8a44ef43b9cedc7f5d791d221d" + integrity sha512-jQ31cORBFE6td25deYeD80wxKBMj+zBmHTrVxnc6CKhx8gho6ipmWM5zj/oeoqioZ99yqBls9Z/9Nss7J26G2g== + dependencies: + format-util "^1.0.3" + +open@^6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/open/-/open-6.4.0.tgz#5c13e96d0dc894686164f18965ecfe889ecfc8a9" + integrity sha512-IFenVPgF70fSm1keSd2iDBIDIBZkroLeuffXq+wKTzTJlBpesFWojV9lb8mzOfaAzM1sr7HQHuO0vtV0zYekGg== + dependencies: + is-wsl "^1.1.0" + +open@^7.0.2: + version "7.0.4" + resolved "https://registry.yarnpkg.com/open/-/open-7.0.4.tgz#c28a9d315e5c98340bf979fdcb2e58664aa10d83" + integrity sha512-brSA+/yq+b08Hsr4c8fsEW2CRzk1BmfN3SAK/5VCHQ9bdoZJ4qa/+AfR0xHjlbbZUyPkUHs1b8x1RqdyZdkVqQ== + dependencies: + is-docker "^2.0.0" + is-wsl "^2.1.1" + +openapi-sampler@^1.0.0-beta.16: + version "1.0.0-beta.16" + resolved "https://registry.yarnpkg.com/openapi-sampler/-/openapi-sampler-1.0.0-beta.16.tgz#7813524d5b88d222efb772ceb5a809075d6d9174" + integrity sha512-05+GvwMagTY7GxoDQoWJfmAUFlxfebciiEzqKmu4iq6+MqBEn62AMUkn0CTxyKhnUGIaR2KXjTeslxIeJwVIOw== + dependencies: + json-pointer "^0.6.0" + +opener@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/opener/-/opener-1.5.1.tgz#6d2f0e77f1a0af0032aca716c2c1fbb8e7e8abed" + integrity sha512-goYSy5c2UXE4Ra1xixabeVh1guIX/ZV/YokJksb6q2lubWu6UbvPQ20p542/sFIll1nl8JnCyK9oBaOcCWXwvA== + +opn@^5.2.0, opn@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/opn/-/opn-5.5.0.tgz#fc7164fab56d235904c51c3b27da6758ca3b9bfc" + integrity sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA== + dependencies: + is-wsl "^1.1.0" + +optimize-css-assets-webpack-plugin@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.3.tgz#e2f1d4d94ad8c0af8967ebd7cf138dcb1ef14572" + integrity sha512-q9fbvCRS6EYtUKKSwI87qm2IxlyJK5b4dygW1rKUBT6mMDhdG5e5bZT63v6tnJR9F9FB/H5a0HTmtw+laUBxKA== + dependencies: + cssnano "^4.1.10" + last-call-webpack-plugin "^3.0.0" + +optionator@^0.8.1: + version "0.8.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" + integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.6" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + word-wrap "~1.2.3" + +ora@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/ora/-/ora-3.4.0.tgz#bf0752491059a3ef3ed4c85097531de9fdbcd318" + integrity sha512-eNwHudNbO1folBP3JsZ19v9azXWtQZjICdr3Q0TDPIaeBQ3mXLrh54wM+er0+hSp+dWKf+Z8KM58CYzEyIYxYg== + dependencies: + chalk "^2.4.2" + cli-cursor "^2.1.0" + cli-spinners "^2.0.0" + log-symbols "^2.2.0" + strip-ansi "^5.2.0" + wcwidth "^1.0.1" + +original@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f" + integrity sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg== + dependencies: + url-parse "^1.4.3" + +os-browserify@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27" + integrity sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc= + +os-homedir@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= + +os-locale@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a" + integrity sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q== + dependencies: + execa "^1.0.0" + lcid "^2.0.0" + mem "^4.0.0" + +os-name@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/os-name/-/os-name-3.1.0.tgz#dec19d966296e1cd62d701a5a66ee1ddeae70801" + integrity sha512-h8L+8aNjNcMpo/mAIBPn5PXCM16iyPGjHNWo6U1YO8sJTMHtEtyczI6QJnLoplswm6goopQkqc7OAnjhWcugVg== + dependencies: + macos-release "^2.2.0" + windows-release "^3.1.0" + +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +p-all@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-all/-/p-all-2.1.0.tgz#91419be56b7dee8fe4c5db875d55e0da084244a0" + integrity sha512-HbZxz5FONzz/z2gJfk6bFca0BCiSRF8jU3yCsWOen/vR6lZjfPOu/e7L3uFzTW1i0H8TlC3vqQstEJPQL4/uLA== + dependencies: + p-map "^2.0.0" + +p-cancelable@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" + integrity sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ== + +p-cancelable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" + integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== + +p-defer@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" + integrity sha1-n26xgvbJqozXQwBKfU+WsZaw+ww= + +p-event@^2.1.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/p-event/-/p-event-2.3.1.tgz#596279ef169ab2c3e0cae88c1cfbb08079993ef6" + integrity sha512-NQCqOFhbpVTMX4qMe8PF8lbGtzZ+LCiN7pcNrb/413Na7+TRoe1xkKUzuWa/YEJdGQ0FvKtj35EEbDoVPO2kbA== + dependencies: + p-timeout "^2.0.1" + +p-event@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/p-event/-/p-event-4.2.0.tgz#af4b049c8acd91ae81083ebd1e6f5cae2044c1b5" + integrity sha512-KXatOjCRXXkSePPb1Nbi0p0m+gQAwdlbhi4wQKJPI1HsMQS9g+Sqp2o+QHziPr7eYJyOZet836KoHEVM1mwOrQ== + dependencies: + p-timeout "^3.1.0" + +p-filter@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-filter/-/p-filter-2.1.0.tgz#1b1472562ae7a0f742f0f3d3d3718ea66ff9c09c" + integrity sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw== + dependencies: + p-map "^2.0.0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= + +p-finally@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-2.0.1.tgz#bd6fcaa9c559a096b680806f4d657b3f0f240561" + integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== + +p-is-promise@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" + integrity sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4= + +p-is-promise@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.1.0.tgz#918cebaea248a62cf7ffab8e3bca8c5f882fc42e" + integrity sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg== + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q== + dependencies: + p-try "^1.0.0" + +p-limit@^2.0.0, p-limit@^2.2.0, p-limit@^2.2.1, p-limit@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + integrity sha1-IKAQOyIqcMj9OcwuWAaA893l7EM= + dependencies: + p-limit "^1.1.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-map@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-2.1.0.tgz#310928feef9c9ecc65b68b17693018a665cea175" + integrity sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw== + +p-map@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-3.0.0.tgz#d704d9af8a2ba684e2600d9a215983d4141a979d" + integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ== + dependencies: + aggregate-error "^3.0.0" + +p-reduce@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-reduce/-/p-reduce-2.1.0.tgz#09408da49507c6c274faa31f28df334bc712b64a" + integrity sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw== + +p-retry@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-3.0.1.tgz#316b4c8893e2c8dc1cfa891f406c4b422bebf328" + integrity sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w== + dependencies: + retry "^0.12.0" + +p-timeout@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" + integrity sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA== + dependencies: + p-finally "^1.0.0" + +p-timeout@^3.0.0, p-timeout@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-3.2.0.tgz#c7e17abc971d2a7962ef83626b35d635acf23dfe" + integrity sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg== + dependencies: + p-finally "^1.0.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + integrity sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M= + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +p-wait-for@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-wait-for/-/p-wait-for-2.0.1.tgz#bc4dc3410dcba8ca4a92354f5b44f5c4ee6b6d7f" + integrity sha512-edEuJC7eRokPf3AWycsS3lp8JimWLnVgCeGoWw67qFerUmsAHKyhRBj8rDvaBjPV2bTyzgTwmF+U8vNMMBEcyA== + dependencies: + p-timeout "^2.0.1" + +p-wait-for@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-wait-for/-/p-wait-for-3.1.0.tgz#9da568a2adda3ea8175a3c43f46a5317e28c0e47" + integrity sha512-0Uy19uhxbssHelu9ynDMcON6BmMk6pH8551CvxROhiz3Vx+yC4RqxjyIDk2V4ll0g9177RKT++PK4zcV58uJ7A== + dependencies: + p-timeout "^3.0.0" + +package-json@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-4.0.1.tgz#8869a0401253661c4c4ca3da6c2121ed555f5eed" + integrity sha1-iGmgQBJTZhxMTKPabCEh7VVfXu0= + dependencies: + got "^6.7.1" + registry-auth-token "^3.0.1" + registry-url "^3.0.3" + semver "^5.1.0" + +package-json@^6.3.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0" + integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ== + dependencies: + got "^9.6.0" + registry-auth-token "^4.0.0" + registry-url "^5.0.0" + semver "^6.2.0" + +pako@~1.0.5: + version "1.0.11" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" + integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw== + +parallel-transform@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/parallel-transform/-/parallel-transform-1.2.0.tgz#9049ca37d6cb2182c3b1d2c720be94d14a5814fc" + integrity sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg== + dependencies: + cyclist "^1.0.1" + inherits "^2.0.3" + readable-stream "^2.1.5" + +param-case@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-3.0.3.tgz#4be41f8399eff621c56eebb829a5e451d9801238" + integrity sha512-VWBVyimc1+QrzappRs7waeN2YmoZFCGXWASRYX1/rGHtXqEcrGEIDm+jqIwFa2fRXNgQEwrxaYuIrX0WcAguTA== + dependencies: + dot-case "^3.0.3" + tslib "^1.10.0" + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-asn1@^5.0.0, parse-asn1@^5.1.5: + version "5.1.5" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.5.tgz#003271343da58dc94cace494faef3d2147ecea0e" + integrity sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ== + dependencies: + asn1.js "^4.0.0" + browserify-aes "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + safe-buffer "^5.1.1" + +parse-entities@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" + integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ== + dependencies: + character-entities "^1.0.0" + character-entities-legacy "^1.0.0" + character-reference-invalid "^1.0.0" + is-alphanumerical "^1.0.0" + is-decimal "^1.0.0" + is-hexadecimal "^1.0.0" + +parse-github-url@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/parse-github-url/-/parse-github-url-1.0.2.tgz#242d3b65cbcdda14bb50439e3242acf6971db395" + integrity sha512-kgBf6avCbO3Cn6+RnzRGLkUsv4ZVqv/VfAYkRsyBcgkshNvVBkRn1FEZcW0Jb+npXQWm2vHPnnOqFteZxRRGNw== + +parse-gitignore@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parse-gitignore/-/parse-gitignore-1.0.1.tgz#8b9dc57f17b810d495c5dfa62eb07caffe7758c7" + integrity sha512-UGyowyjtx26n65kdAMWhm6/3uy5uSrpcuH7tt+QEVudiBoVS+eqHxD5kbi9oWVRwj7sCzXqwuM+rUGw7earl6A== + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + integrity sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA= + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +parse-json@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.0.0.tgz#73e5114c986d143efa3712d4ea24db9a4266f60f" + integrity sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + lines-and-columns "^1.1.6" + +parse-ms@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/parse-ms/-/parse-ms-2.1.0.tgz#348565a753d4391fa524029956b172cb7753097d" + integrity sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA== + +parse-numeric-range@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/parse-numeric-range/-/parse-numeric-range-0.0.2.tgz#b4f09d413c7adbcd987f6e9233c7b4b210c938e4" + integrity sha1-tPCdQTx6282Yf26SM8e0shDJOOQ= + +parse5@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" + integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== + +parse5@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" + integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== + +parseurl@~1.3.2, parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + +pascal-case@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/pascal-case/-/pascal-case-3.1.1.tgz#5ac1975133ed619281e88920973d2cd1f279de5f" + integrity sha512-XIeHKqIrsquVTQL2crjq3NfJUxmdLasn3TYOU0VBM+UX2a6ztAWBlJQBePLGY7VHW8+2dRadeIPK5+KImwTxQA== + dependencies: + no-case "^3.0.3" + tslib "^1.10.0" + +pascalcase@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" + integrity sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ= + +password-prompt@^1.0.7, password-prompt@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/password-prompt/-/password-prompt-1.1.2.tgz#85b2f93896c5bd9e9f2d6ff0627fa5af3dc00923" + integrity sha512-bpuBhROdrhuN3E7G/koAju0WjVw9/uQOG5Co5mokNj0MiOSBVZS1JTwM4zl55hu0WFmIEFvO9cU9sJQiBIYeIA== + dependencies: + ansi-escapes "^3.1.0" + cross-spawn "^6.0.5" + +path-browserify@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.1.tgz#e6c4ddd7ed3aa27c68a20cc4e50e1a4ee83bbc4a" + integrity sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ== + +path-dirname@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" + integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA= + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-is-inside@1.0.2, path-is-inside@^1.0.1, path-is-inside@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM= + +path-key@^2.0.0, path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A= + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" + integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w= + +path-to-regexp@2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-2.2.1.tgz#90b617025a16381a879bc82a38d4e8bdeb2bcf45" + integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ== + +path-to-regexp@^1.7.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a" + integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA== + dependencies: + isarray "0.0.1" + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + integrity sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg== + dependencies: + pify "^3.0.0" + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +pbkdf2@^3.0.3: + version "3.1.1" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.1.tgz#cb8724b0fada984596856d1a6ebafd3584654b94" + integrity sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg== + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +pend@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50" + integrity sha1-elfrVQpng/kRUzH89GY9XI4AelA= + +perfect-scrollbar@^1.4.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/perfect-scrollbar/-/perfect-scrollbar-1.5.0.tgz#821d224ed8ff61990c23f26db63048cdc75b6b83" + integrity sha512-NrNHJn5mUGupSiheBTy6x+6SXCFbLlm8fVZh9moIzw/LgqElN5q4ncR4pbCBCYuCJ8Kcl9mYM0NgDxvW+b4LxA== + +picomatch@^2.0.4, picomatch@^2.0.5, picomatch@^2.2.1: + version "2.2.2" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" + integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== + +pify@^2.0.0, pify@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY= + +pify@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" + integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pkg-dir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3" + integrity sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw== + dependencies: + find-up "^3.0.0" + +pkg-dir@^4.1.0, pkg-dir@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== + dependencies: + find-up "^4.0.0" + +pkg-up@3.1.0, pkg-up@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-3.1.0.tgz#100ec235cc150e4fd42519412596a28512a0def5" + integrity sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA== + dependencies: + find-up "^3.0.0" + +pkg-up@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-2.0.0.tgz#c819ac728059a461cab1c3889a2be3c49a004d7f" + integrity sha1-yBmscoBZpGHKscOImivjxJoATX8= + dependencies: + find-up "^2.1.0" + +pnp-webpack-plugin@^1.6.4: + version "1.6.4" + resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.6.4.tgz#c9711ac4dc48a685dabafc86f8b6dd9f8df84149" + integrity sha512-7Wjy+9E3WwLOEL30D+m8TSTF7qJJUJLONBnwQp0518siuMxUQUbgZwssaFX+QKlZkjHZcw/IpZCt/H0srrntSg== + dependencies: + ts-pnp "^1.1.6" + +polished@^3.4.4: + version "3.6.5" + resolved "https://registry.yarnpkg.com/polished/-/polished-3.6.5.tgz#dbefdde64c675935ec55119fe2a2ab627ca82e9c" + integrity sha512-VwhC9MlhW7O5dg/z7k32dabcAFW1VI2+7fSe8cE/kXcfL7mVdoa5UxciYGW2sJU78ldDLT6+ROEKIZKFNTnUXQ== + dependencies: + "@babel/runtime" "^7.9.2" + +portfinder@^1.0.26: + version "1.0.26" + resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.26.tgz#475658d56ca30bed72ac7f1378ed350bd1b64e70" + integrity sha512-Xi7mKxJHHMI3rIUrnm/jjUgwhbYMkp/XKEcZX3aG4BrumLpq3nmoQMX+ClYnDZnZ/New7IatC1no5RX0zo1vXQ== + dependencies: + async "^2.6.2" + debug "^3.1.1" + mkdirp "^0.5.1" + +posix-character-classes@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" + integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs= + +postcss-attribute-case-insensitive@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-4.0.2.tgz#d93e46b504589e94ac7277b0463226c68041a880" + integrity sha512-clkFxk/9pcdb4Vkn0hAHq3YnxBQ2p0CGD1dy24jN+reBck+EWxMbxSUqN4Yj7t0w8csl87K6p0gxBe1utkJsYA== + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^6.0.2" + +postcss-calc@^7.0.1: + version "7.0.2" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-7.0.2.tgz#504efcd008ca0273120568b0792b16cdcde8aac1" + integrity sha512-rofZFHUg6ZIrvRwPeFktv06GdbDYLcGqh9EwiMutZg+a0oePCCw1zHOEiji6LCpyRcjTREtPASuUqeAvYlEVvQ== + dependencies: + postcss "^7.0.27" + postcss-selector-parser "^6.0.2" + postcss-value-parser "^4.0.2" + +postcss-color-functional-notation@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-color-functional-notation/-/postcss-color-functional-notation-2.0.1.tgz#5efd37a88fbabeb00a2966d1e53d98ced93f74e0" + integrity sha512-ZBARCypjEDofW4P6IdPVTLhDNXPRn8T2s1zHbZidW6rPaaZvcnCS2soYFIQJrMZSxiePJ2XIYTlcb2ztr/eT2g== + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-color-gray@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-color-gray/-/postcss-color-gray-5.0.0.tgz#532a31eb909f8da898ceffe296fdc1f864be8547" + integrity sha512-q6BuRnAGKM/ZRpfDascZlIZPjvwsRye7UDNalqVz3s7GDxMtqPY6+Q871liNxsonUw8oC61OG+PSaysYpl1bnw== + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.5" + postcss-values-parser "^2.0.0" + +postcss-color-hex-alpha@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/postcss-color-hex-alpha/-/postcss-color-hex-alpha-5.0.3.tgz#a8d9ca4c39d497c9661e374b9c51899ef0f87388" + integrity sha512-PF4GDel8q3kkreVXKLAGNpHKilXsZ6xuu+mOQMHWHLPNyjiUBOr75sp5ZKJfmv1MCus5/DWUGcK9hm6qHEnXYw== + dependencies: + postcss "^7.0.14" + postcss-values-parser "^2.0.1" + +postcss-color-mod-function@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/postcss-color-mod-function/-/postcss-color-mod-function-3.0.3.tgz#816ba145ac11cc3cb6baa905a75a49f903e4d31d" + integrity sha512-YP4VG+xufxaVtzV6ZmhEtc+/aTXH3d0JLpnYfxqTvwZPbJhWqp8bSY3nfNzNRFLgB4XSaBA82OE4VjOOKpCdVQ== + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-color-rebeccapurple@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-4.0.1.tgz#c7a89be872bb74e45b1e3022bfe5748823e6de77" + integrity sha512-aAe3OhkS6qJXBbqzvZth2Au4V3KieR5sRQ4ptb2b2O8wgvB3SJBsdG+jsn2BZbbwekDG8nTfcCNKcSfe/lEy8g== + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-colormin@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-4.0.3.tgz#ae060bce93ed794ac71264f08132d550956bd381" + integrity sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw== + dependencies: + browserslist "^4.0.0" + color "^3.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-convert-values@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz#ca3813ed4da0f812f9d43703584e449ebe189a7f" + integrity sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ== + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-custom-media@^7.0.8: + version "7.0.8" + resolved "https://registry.yarnpkg.com/postcss-custom-media/-/postcss-custom-media-7.0.8.tgz#fffd13ffeffad73621be5f387076a28b00294e0c" + integrity sha512-c9s5iX0Ge15o00HKbuRuTqNndsJUbaXdiNsksnVH8H4gdc+zbLzr/UasOwNG6CTDpLFekVY4672eWdiiWu2GUg== + dependencies: + postcss "^7.0.14" + +postcss-custom-properties@^8.0.11: + version "8.0.11" + resolved "https://registry.yarnpkg.com/postcss-custom-properties/-/postcss-custom-properties-8.0.11.tgz#2d61772d6e92f22f5e0d52602df8fae46fa30d97" + integrity sha512-nm+o0eLdYqdnJ5abAJeXp4CEU1c1k+eB2yMCvhgzsds/e0umabFrN6HoTy/8Q4K5ilxERdl/JD1LO5ANoYBeMA== + dependencies: + postcss "^7.0.17" + postcss-values-parser "^2.0.1" + +postcss-custom-selectors@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-custom-selectors/-/postcss-custom-selectors-5.1.2.tgz#64858c6eb2ecff2fb41d0b28c9dd7b3db4de7fba" + integrity sha512-DSGDhqinCqXqlS4R7KGxL1OSycd1lydugJ1ky4iRXPHdBRiozyMHrdu0H3o7qNOCiZwySZTUI5MV0T8QhCLu+w== + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-dir-pseudo-class@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-5.0.0.tgz#6e3a4177d0edb3abcc85fdb6fbb1c26dabaeaba2" + integrity sha512-3pm4oq8HYWMZePJY+5ANriPs3P07q+LW6FAdTlkFH2XqDdP4HeeJYMOzn0HYLhRSjBO3fhiqSwwU9xEULSrPgw== + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-discard-comments@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz#1fbabd2c246bff6aaad7997b2b0918f4d7af4033" + integrity sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg== + dependencies: + postcss "^7.0.0" + +postcss-discard-duplicates@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz#3fe133cd3c82282e550fc9b239176a9207b784eb" + integrity sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ== + dependencies: + postcss "^7.0.0" + +postcss-discard-empty@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz#c8c951e9f73ed9428019458444a02ad90bb9f765" + integrity sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w== + dependencies: + postcss "^7.0.0" + +postcss-discard-overridden@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz#652aef8a96726f029f5e3e00146ee7a4e755ff57" + integrity sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg== + dependencies: + postcss "^7.0.0" + +postcss-double-position-gradients@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/postcss-double-position-gradients/-/postcss-double-position-gradients-1.0.0.tgz#fc927d52fddc896cb3a2812ebc5df147e110522e" + integrity sha512-G+nV8EnQq25fOI8CH/B6krEohGWnF5+3A6H/+JEpOncu5dCnkS1QQ6+ct3Jkaepw1NGVqqOZH6lqrm244mCftA== + dependencies: + postcss "^7.0.5" + postcss-values-parser "^2.0.0" + +postcss-env-function@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-env-function/-/postcss-env-function-2.0.2.tgz#0f3e3d3c57f094a92c2baf4b6241f0b0da5365d7" + integrity sha512-rwac4BuZlITeUbiBq60h/xbLzXY43qOsIErngWa4l7Mt+RaSkT7QBjXVGTcBHupykkblHMDrBFh30zchYPaOUw== + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-focus-visible@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-focus-visible/-/postcss-focus-visible-4.0.0.tgz#477d107113ade6024b14128317ade2bd1e17046e" + integrity sha512-Z5CkWBw0+idJHSV6+Bgf2peDOFf/x4o+vX/pwcNYrWpXFrSfTkQ3JQ1ojrq9yS+upnAlNRHeg8uEwFTgorjI8g== + dependencies: + postcss "^7.0.2" + +postcss-focus-within@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-focus-within/-/postcss-focus-within-3.0.0.tgz#763b8788596cee9b874c999201cdde80659ef680" + integrity sha512-W0APui8jQeBKbCGZudW37EeMCjDeVxKgiYfIIEo8Bdh5SpB9sxds/Iq8SEuzS0Q4YFOlG7EPFulbbxujpkrV2w== + dependencies: + postcss "^7.0.2" + +postcss-font-variant@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-font-variant/-/postcss-font-variant-4.0.0.tgz#71dd3c6c10a0d846c5eda07803439617bbbabacc" + integrity sha512-M8BFYKOvCrI2aITzDad7kWuXXTm0YhGdP9Q8HanmN4EF1Hmcgs1KK5rSHylt/lUJe8yLxiSwWAHdScoEiIxztg== + dependencies: + postcss "^7.0.2" + +postcss-gap-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-gap-properties/-/postcss-gap-properties-2.0.0.tgz#431c192ab3ed96a3c3d09f2ff615960f902c1715" + integrity sha512-QZSqDaMgXCHuHTEzMsS2KfVDOq7ZFiknSpkrPJY6jmxbugUPTuSzs/vuE5I3zv0WAS+3vhrlqhijiprnuQfzmg== + dependencies: + postcss "^7.0.2" + +postcss-image-set-function@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/postcss-image-set-function/-/postcss-image-set-function-3.0.1.tgz#28920a2f29945bed4c3198d7df6496d410d3f288" + integrity sha512-oPTcFFip5LZy8Y/whto91L9xdRHCWEMs3e1MdJxhgt4jy2WYXfhkng59fH5qLXSCPN8k4n94p1Czrfe5IOkKUw== + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-initial@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.2.tgz#f018563694b3c16ae8eaabe3c585ac6319637b2d" + integrity sha512-ugA2wKonC0xeNHgirR4D3VWHs2JcU08WAi1KFLVcnb7IN89phID6Qtg2RIctWbnvp1TM2BOmDtX8GGLCKdR8YA== + dependencies: + lodash.template "^4.5.0" + postcss "^7.0.2" + +postcss-lab-function@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-lab-function/-/postcss-lab-function-2.0.1.tgz#bb51a6856cd12289ab4ae20db1e3821ef13d7d2e" + integrity sha512-whLy1IeZKY+3fYdqQFuDBf8Auw+qFuVnChWjmxm/UhHWqNHZx+B99EwxTvGYmUBqe3Fjxs4L1BoZTJmPu6usVg== + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-load-config@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-2.1.0.tgz#c84d692b7bb7b41ddced94ee62e8ab31b417b003" + integrity sha512-4pV3JJVPLd5+RueiVVB+gFOAa7GWc25XQcMp86Zexzke69mKf6Nx9LRcQywdz7yZI9n1udOxmLuAwTBypypF8Q== + dependencies: + cosmiconfig "^5.0.0" + import-cwd "^2.0.0" + +postcss-loader@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-3.0.0.tgz#6b97943e47c72d845fa9e03f273773d4e8dd6c2d" + integrity sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA== + dependencies: + loader-utils "^1.1.0" + postcss "^7.0.0" + postcss-load-config "^2.0.0" + schema-utils "^1.0.0" + +postcss-logical@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-logical/-/postcss-logical-3.0.0.tgz#2495d0f8b82e9f262725f75f9401b34e7b45d5b5" + integrity sha512-1SUKdJc2vuMOmeItqGuNaC+N8MzBWFWEkAnRnLpFYj1tGGa7NqyVBujfRtgNa2gXR+6RkGUiB2O5Vmh7E2RmiA== + dependencies: + postcss "^7.0.2" + +postcss-media-minmax@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-media-minmax/-/postcss-media-minmax-4.0.0.tgz#b75bb6cbc217c8ac49433e12f22048814a4f5ed5" + integrity sha512-fo9moya6qyxsjbFAYl97qKO9gyre3qvbMnkOZeZwlsW6XYFsvs2DMGDlchVLfAd8LHPZDxivu/+qW2SMQeTHBw== + dependencies: + postcss "^7.0.2" + +postcss-merge-longhand@^4.0.11: + version "4.0.11" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz#62f49a13e4a0ee04e7b98f42bb16062ca2549e24" + integrity sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw== + dependencies: + css-color-names "0.0.4" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + stylehacks "^4.0.0" + +postcss-merge-rules@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz#362bea4ff5a1f98e4075a713c6cb25aefef9a650" + integrity sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ== + dependencies: + browserslist "^4.0.0" + caniuse-api "^3.0.0" + cssnano-util-same-parent "^4.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + vendors "^1.0.0" + +postcss-minify-font-values@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz#cd4c344cce474343fac5d82206ab2cbcb8afd5a6" + integrity sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg== + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-minify-gradients@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz#93b29c2ff5099c535eecda56c4aa6e665a663471" + integrity sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q== + dependencies: + cssnano-util-get-arguments "^4.0.0" + is-color-stop "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-minify-params@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz#6b9cef030c11e35261f95f618c90036d680db874" + integrity sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg== + dependencies: + alphanum-sort "^1.0.0" + browserslist "^4.0.0" + cssnano-util-get-arguments "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + uniqs "^2.0.0" + +postcss-minify-selectors@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz#e2e5eb40bfee500d0cd9243500f5f8ea4262fbd8" + integrity sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g== + dependencies: + alphanum-sort "^1.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + +postcss-modules-extract-imports@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz#818719a1ae1da325f9832446b01136eeb493cd7e" + integrity sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ== + dependencies: + postcss "^7.0.5" + +postcss-modules-local-by-default@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-3.0.2.tgz#e8a6561be914aaf3c052876377524ca90dbb7915" + integrity sha512-jM/V8eqM4oJ/22j0gx4jrp63GSvDH6v86OqyTHHUvk4/k1vceipZsaymiZ5PvocqZOl5SFHiFJqjs3la0wnfIQ== + dependencies: + icss-utils "^4.1.1" + postcss "^7.0.16" + postcss-selector-parser "^6.0.2" + postcss-value-parser "^4.0.0" + +postcss-modules-scope@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz#385cae013cc7743f5a7d7602d1073a89eaae62ee" + integrity sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ== + dependencies: + postcss "^7.0.6" + postcss-selector-parser "^6.0.0" + +postcss-modules-values@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-3.0.0.tgz#5b5000d6ebae29b4255301b4a3a54574423e7f10" + integrity sha512-1//E5jCBrZ9DmRX+zCtmQtRSV6PV42Ix7Bzj9GbwJceduuf7IqP8MgeTXuRDHOWj2m0VzZD5+roFWDuU8RQjcg== + dependencies: + icss-utils "^4.0.0" + postcss "^7.0.6" + +postcss-nesting@^7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/postcss-nesting/-/postcss-nesting-7.0.1.tgz#b50ad7b7f0173e5b5e3880c3501344703e04c052" + integrity sha512-FrorPb0H3nuVq0Sff7W2rnc3SmIcruVC6YwpcS+k687VxyxO33iE1amna7wHuRVzM8vfiYofXSBHNAZ3QhLvYg== + dependencies: + postcss "^7.0.2" + +postcss-normalize-charset@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz#8b35add3aee83a136b0471e0d59be58a50285dd4" + integrity sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g== + dependencies: + postcss "^7.0.0" + +postcss-normalize-display-values@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz#0dbe04a4ce9063d4667ed2be476bb830c825935a" + integrity sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ== + dependencies: + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-positions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz#05f757f84f260437378368a91f8932d4b102917f" + integrity sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA== + dependencies: + cssnano-util-get-arguments "^4.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-repeat-style@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz#c4ebbc289f3991a028d44751cbdd11918b17910c" + integrity sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q== + dependencies: + cssnano-util-get-arguments "^4.0.0" + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-string@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz#cd44c40ab07a0c7a36dc5e99aace1eca4ec2690c" + integrity sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA== + dependencies: + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-timing-functions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz#8e009ca2a3949cdaf8ad23e6b6ab99cb5e7d28d9" + integrity sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A== + dependencies: + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-unicode@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz#841bd48fdcf3019ad4baa7493a3d363b52ae1cfb" + integrity sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg== + dependencies: + browserslist "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-url@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz#10e437f86bc7c7e58f7b9652ed878daaa95faae1" + integrity sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA== + dependencies: + is-absolute-url "^2.0.0" + normalize-url "^3.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-whitespace@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz#bf1d4070fe4fcea87d1348e825d8cc0c5faa7d82" + integrity sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA== + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-ordered-values@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz#0cf75c820ec7d5c4d280189559e0b571ebac0eee" + integrity sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw== + dependencies: + cssnano-util-get-arguments "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-overflow-shorthand@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-overflow-shorthand/-/postcss-overflow-shorthand-2.0.0.tgz#31ecf350e9c6f6ddc250a78f0c3e111f32dd4c30" + integrity sha512-aK0fHc9CBNx8jbzMYhshZcEv8LtYnBIRYQD5i7w/K/wS9c2+0NSR6B3OVMu5y0hBHYLcMGjfU+dmWYNKH0I85g== + dependencies: + postcss "^7.0.2" + +postcss-page-break@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-page-break/-/postcss-page-break-2.0.0.tgz#add52d0e0a528cabe6afee8b46e2abb277df46bf" + integrity sha512-tkpTSrLpfLfD9HvgOlJuigLuk39wVTbbd8RKcy8/ugV2bNBUW3xU+AIqyxhDrQr1VUj1RmyJrBn1YWrqUm9zAQ== + dependencies: + postcss "^7.0.2" + +postcss-place@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-place/-/postcss-place-4.0.1.tgz#e9f39d33d2dc584e46ee1db45adb77ca9d1dcc62" + integrity sha512-Zb6byCSLkgRKLODj/5mQugyuj9bvAAw9LqJJjgwz5cYryGeXfFZfSXoP1UfveccFmeq0b/2xxwcTEVScnqGxBg== + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-preset-env@^6.7.0: + version "6.7.0" + resolved "https://registry.yarnpkg.com/postcss-preset-env/-/postcss-preset-env-6.7.0.tgz#c34ddacf8f902383b35ad1e030f178f4cdf118a5" + integrity sha512-eU4/K5xzSFwUFJ8hTdTQzo2RBLbDVt83QZrAvI07TULOkmyQlnYlpwep+2yIK+K+0KlZO4BvFcleOCCcUtwchg== + dependencies: + autoprefixer "^9.6.1" + browserslist "^4.6.4" + caniuse-lite "^1.0.30000981" + css-blank-pseudo "^0.1.4" + css-has-pseudo "^0.10.0" + css-prefers-color-scheme "^3.1.1" + cssdb "^4.4.0" + postcss "^7.0.17" + postcss-attribute-case-insensitive "^4.0.1" + postcss-color-functional-notation "^2.0.1" + postcss-color-gray "^5.0.0" + postcss-color-hex-alpha "^5.0.3" + postcss-color-mod-function "^3.0.3" + postcss-color-rebeccapurple "^4.0.1" + postcss-custom-media "^7.0.8" + postcss-custom-properties "^8.0.11" + postcss-custom-selectors "^5.1.2" + postcss-dir-pseudo-class "^5.0.0" + postcss-double-position-gradients "^1.0.0" + postcss-env-function "^2.0.2" + postcss-focus-visible "^4.0.0" + postcss-focus-within "^3.0.0" + postcss-font-variant "^4.0.0" + postcss-gap-properties "^2.0.0" + postcss-image-set-function "^3.0.1" + postcss-initial "^3.0.0" + postcss-lab-function "^2.0.1" + postcss-logical "^3.0.0" + postcss-media-minmax "^4.0.0" + postcss-nesting "^7.0.0" + postcss-overflow-shorthand "^2.0.0" + postcss-page-break "^2.0.0" + postcss-place "^4.0.1" + postcss-pseudo-class-any-link "^6.0.0" + postcss-replace-overflow-wrap "^3.0.0" + postcss-selector-matches "^4.0.0" + postcss-selector-not "^4.0.0" + +postcss-pseudo-class-any-link@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-6.0.0.tgz#2ed3eed393b3702879dec4a87032b210daeb04d1" + integrity sha512-lgXW9sYJdLqtmw23otOzrtbDXofUdfYzNm4PIpNE322/swES3VU9XlXHeJS46zT2onFO7V1QFdD4Q9LiZj8mew== + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-reduce-initial@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz#7fd42ebea5e9c814609639e2c2e84ae270ba48df" + integrity sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA== + dependencies: + browserslist "^4.0.0" + caniuse-api "^3.0.0" + has "^1.0.0" + postcss "^7.0.0" + +postcss-reduce-transforms@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz#17efa405eacc6e07be3414a5ca2d1074681d4e29" + integrity sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg== + dependencies: + cssnano-util-get-match "^4.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-replace-overflow-wrap@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-3.0.0.tgz#61b360ffdaedca84c7c918d2b0f0d0ea559ab01c" + integrity sha512-2T5hcEHArDT6X9+9dVSPQdo7QHzG4XKclFT8rU5TzJPDN7RIRTbO9c4drUISOVemLj03aezStHCR2AIcr8XLpw== + dependencies: + postcss "^7.0.2" + +postcss-selector-matches@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-matches/-/postcss-selector-matches-4.0.0.tgz#71c8248f917ba2cc93037c9637ee09c64436fcff" + integrity sha512-LgsHwQR/EsRYSqlwdGzeaPKVT0Ml7LAT6E75T8W8xLJY62CE4S/l03BWIt3jT8Taq22kXP08s2SfTSzaraoPww== + dependencies: + balanced-match "^1.0.0" + postcss "^7.0.2" + +postcss-selector-not@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-not/-/postcss-selector-not-4.0.0.tgz#c68ff7ba96527499e832724a2674d65603b645c0" + integrity sha512-W+bkBZRhqJaYN8XAnbbZPLWMvZD1wKTu0UxtFKdhtGjWYmxhkUneoeOhRJKdAE5V7ZTlnbHfCR+6bNwK9e1dTQ== + dependencies: + balanced-match "^1.0.0" + postcss "^7.0.2" + +postcss-selector-parser@^3.0.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz#b310f5c4c0fdaf76f94902bbaa30db6aa84f5270" + integrity sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA== + dependencies: + dot-prop "^5.2.0" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-selector-parser@^5.0.0-rc.3, postcss-selector-parser@^5.0.0-rc.4: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz#249044356697b33b64f1a8f7c80922dddee7195c" + integrity sha512-w+zLE5Jhg6Liz8+rQOWEAwtwkyqpfnmsinXjXg6cY7YIONZZtgvE0v2O0uhQBs0peNomOJwWRKt6JBfTdTd3OQ== + dependencies: + cssesc "^2.0.0" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-selector-parser@^6.0.0, postcss-selector-parser@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz#934cf799d016c83411859e09dcecade01286ec5c" + integrity sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg== + dependencies: + cssesc "^3.0.0" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-svgo@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-4.0.2.tgz#17b997bc711b333bab143aaed3b8d3d6e3d38258" + integrity sha512-C6wyjo3VwFm0QgBy+Fu7gCYOkCmgmClghO+pjcxvrcBKtiKt0uCF+hvbMO1fyv5BMImRK90SMb+dwUnfbGd+jw== + dependencies: + is-svg "^3.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + svgo "^1.0.0" + +postcss-unique-selectors@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz#9446911f3289bfd64c6d680f073c03b1f9ee4bac" + integrity sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg== + dependencies: + alphanum-sort "^1.0.0" + postcss "^7.0.0" + uniqs "^2.0.0" + +postcss-value-parser@^3.0.0, postcss-value-parser@^3.3.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281" + integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ== + +postcss-value-parser@^4.0.0, postcss-value-parser@^4.0.2, postcss-value-parser@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz#443f6a20ced6481a2bda4fa8532a6e55d789a2cb" + integrity sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ== + +postcss-values-parser@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/postcss-values-parser/-/postcss-values-parser-1.5.0.tgz#5d9fa63e2bcb0179ce48f3235303765eb89f3047" + integrity sha512-3M3p+2gMp0AH3da530TlX8kiO1nxdTnc3C6vr8dMxRLIlh8UYkz0/wcwptSXjhtx2Fr0TySI7a+BHDQ8NL7LaQ== + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-values-parser@^2.0.0, postcss-values-parser@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-values-parser/-/postcss-values-parser-2.0.1.tgz#da8b472d901da1e205b47bdc98637b9e9e550e5f" + integrity sha512-2tLuBsA6P4rYTNKCXYG/71C7j1pU6pK503suYOmn4xYrQIzW+opD+7FAFNuGSdZC/3Qfy334QbeMu7MEb8gOxg== + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.16, postcss@^7.0.17, postcss@^7.0.2, postcss@^7.0.27, postcss@^7.0.30, postcss@^7.0.32, postcss@^7.0.5, postcss@^7.0.6: + version "7.0.32" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.32.tgz#4310d6ee347053da3433db2be492883d62cec59d" + integrity sha512-03eXong5NLnNCD05xscnGKGDZ98CyzoqPSMjOe6SuoQY7Z2hIj0Ld1g/O/UQRuOle2aRtiIRDg9tDcTGAkLfKw== + dependencies: + chalk "^2.4.2" + source-map "^0.6.1" + supports-color "^6.1.0" + +precinct@^6.1.2, precinct@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/precinct/-/precinct-6.3.1.tgz#8ad735a8afdfc48b56ed39c9ad3bf999b6b928dc" + integrity sha512-JAwyLCgTylWminoD7V0VJwMElWmwrVSR6r9HaPWCoswkB4iFzX7aNtO7VBfAVPy+NhmjKb8IF8UmlWJXzUkOIQ== + dependencies: + commander "^2.20.3" + debug "^4.1.1" + detective-amd "^3.0.0" + detective-cjs "^3.1.1" + detective-es6 "^2.1.0" + detective-less "^1.0.2" + detective-postcss "^3.0.1" + detective-sass "^3.0.1" + detective-scss "^2.0.1" + detective-stylus "^1.0.0" + detective-typescript "^5.8.0" + module-definition "^3.3.0" + node-source-walk "^4.2.0" + +precond@0.2: + version "0.2.3" + resolved "https://registry.yarnpkg.com/precond/-/precond-0.2.3.tgz#aa9591bcaa24923f1e0f4849d240f47efc1075ac" + integrity sha1-qpWRvKokkj8eD0hJ0kD0fvwQdaw= + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= + +prepend-http@^1.0.0, prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + integrity sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw= + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= + +pretty-error@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" + integrity sha1-X0+HyPkeWuPzuoerTPXgOxoX8aM= + dependencies: + renderkid "^2.0.1" + utila "~0.4" + +pretty-format@^24.9.0: + version "24.9.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-24.9.0.tgz#12fac31b37019a4eea3c11aa9a959eb7628aa7c9" + integrity sha512-00ZMZUiHaJrNfk33guavqgvfJS30sLYf0f8+Srklv0AMPodGGHcoHgksZ3OThYnIvOd+8yMCn0YiEOogjlgsnA== + dependencies: + "@jest/types" "^24.9.0" + ansi-regex "^4.0.0" + ansi-styles "^3.2.0" + react-is "^16.8.4" + +pretty-ms@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/pretty-ms/-/pretty-ms-5.1.0.tgz#b906bdd1ec9e9799995c372e2b1c34f073f95384" + integrity sha512-4gaK1skD2gwscCfkswYQRmddUb2GJZtzDGRjHWadVHtK/DIKFufa12MvES6/xu1tVbUYeia5bmLcwJtZJQUqnw== + dependencies: + parse-ms "^2.1.0" + +pretty-time@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/pretty-time/-/pretty-time-1.1.0.tgz#ffb7429afabb8535c346a34e41873adf3d74dd0e" + integrity sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA== + +prettyjson@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prettyjson/-/prettyjson-1.2.1.tgz#fcffab41d19cab4dfae5e575e64246619b12d289" + integrity sha1-/P+rQdGcq0365eV15kJGYZsS0ok= + dependencies: + colors "^1.1.2" + minimist "^1.2.0" + +prism-react-renderer@^1.0.1, prism-react-renderer@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-1.1.1.tgz#1c1be61b1eb9446a146ca7a50b7bcf36f2a70a44" + integrity sha512-MgMhSdHuHymNRqD6KM3eGS0PNqgK9q4QF5P0yoQQvpB6jNjeSAi3jcSAz0Sua/t9fa4xDOMar9HJbLa08gl9ug== + +prismjs@^1.19.0, prismjs@^1.20.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.20.0.tgz#9b685fc480a3514ee7198eac6a3bf5024319ff03" + integrity sha512-AEDjSrVNkynnw6A+B1DsFkd6AVdTnp+/WoUixFRULlCLZVRZlVQMVWio/16jv7G1FscUxQxOQhWwApgbnxr6kQ== + optionalDependencies: + clipboard "^2.0.0" + +private@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" + integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg== + +process-nextick-args@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= + +promise-inflight@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" + integrity sha1-mEcocL8igTL8vdhoEputEsPAKeM= + +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== + dependencies: + asap "~2.0.3" + +prop-types@^15.5.0, prop-types@^15.5.4, prop-types@^15.5.8, prop-types@^15.6.2, prop-types@^15.7.2: + version "15.7.2" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" + integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.8.1" + +property-information@^5.0.0, property-information@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/property-information/-/property-information-5.5.0.tgz#4dc075d493061a82e2b7d096f406e076ed859943" + integrity sha512-RgEbCx2HLa1chNgvChcx+rrCWD0ctBmGSE0M7lVm1yyv4UbvbrWoXp/BkVLZefzjrRBGW8/Js6uh/BnlHXFyjA== + dependencies: + xtend "^4.0.0" + +proto-list@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" + integrity sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk= + +proxy-addr@~2.0.5: + version "2.0.6" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.6.tgz#fdc2336505447d3f2f2c638ed272caf614bbb2bf" + integrity sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw== + dependencies: + forwarded "~0.1.2" + ipaddr.js "1.9.1" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY= + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= + +public-encrypt@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0" + integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q== + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + safe-buffer "^5.1.2" + +pump@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.3.tgz#5dfe8311c33bbf6fc18261f9f34702c47c08a954" + integrity sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pump@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909" + integrity sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pump@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" + integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pumpify@^1.3.3: + version "1.5.1" + resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce" + integrity sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ== + dependencies: + duplexify "^3.6.0" + inherits "^2.0.3" + pump "^2.0.0" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + integrity sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0= + +punycode@^1.2.4, punycode@^1.3.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= + +punycode@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== + +pupa@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pupa/-/pupa-2.0.1.tgz#dbdc9ff48ffbea4a26a069b6f9f7abb051008726" + integrity sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA== + dependencies: + escape-goat "^2.0.0" + +q@^1.1.2: + version "1.5.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" + integrity sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc= + +qs@6.7.0: + version "6.7.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.7.0.tgz#41dc1a015e3d581f1621776be31afb2876a9b1bc" + integrity sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ== + +qs@^6.9.3: + version "6.9.4" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.9.4.tgz#9090b290d1f91728d3c22e54843ca44aea5ab687" + integrity sha512-A1kFqHekCTM7cz0udomYUoYNWjBebHm/5wzU/XqrBRBNWectVH0QIiN+NEcZ0Dte5hvzHwbr8+XQmguPhJ6WdQ== + +query-string@^4.1.0: + version "4.3.4" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.4.tgz#bbb693b9ca915c232515b228b1a02b609043dbeb" + integrity sha1-u7aTucqRXCMlFbIosaArYJBD2+s= + dependencies: + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +query-string@^5.0.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + integrity sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw== + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +querystring-es3@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" + integrity sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM= + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + integrity sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA= + +querystringify@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.1.1.tgz#60e5a5fd64a7f8bfa4d2ab2ed6fdf4c85bad154e" + integrity sha512-w7fLxIRCRT7U8Qu53jQnJyPkYZIaR4n5151KMfcJlO/A9397Wxb1amJvROTK6TOnp7PfoAmg/qXiNHI+08jRfA== + +random-bytes@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/random-bytes/-/random-bytes-1.0.0.tgz#4f68a1dc0ae58bd3fb95848c30324db75d64360b" + integrity sha1-T2ih3Arli9P7lYSMMDJNt11kNgs= + +random-item@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/random-item/-/random-item-1.0.0.tgz#16ee31626cb050c8a1686a5f0f42a6b99a2aaf11" + integrity sha1-Fu4xYmywUMihaGpfD0KmuZoqrxE= + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5, randombytes@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== + dependencies: + safe-buffer "^5.1.0" + +randomfill@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" + integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw== + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + +range-parser@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + integrity sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4= + +range-parser@^1.2.1, range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.0.tgz#a1ce6fb9c9bc356ca52e89256ab59059e13d0332" + integrity sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q== + dependencies: + bytes "3.1.0" + http-errors "1.7.2" + iconv-lite "0.4.24" + unpipe "1.0.0" + +raw-body@^2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.1.tgz#30ac82f98bb5ae8c152e67149dac8d55153b168c" + integrity sha512-9WmIKF6mkvA0SLmA2Knm9+qj89e+j1zqgyn8aXGd7+nAduPoqgI9lO57SAZNn/Byzo5P7JhXTyg9PzaJbH73bA== + dependencies: + bytes "3.1.0" + http-errors "1.7.3" + iconv-lite "0.4.24" + unpipe "1.0.0" + +rc@^1.0.1, rc@^1.1.6, rc@^1.2.8: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-dev-utils@^10.2.1: + version "10.2.1" + resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-10.2.1.tgz#f6de325ae25fa4d546d09df4bb1befdc6dd19c19" + integrity sha512-XxTbgJnYZmxuPtY3y/UV0D8/65NKkmaia4rXzViknVnZeVlklSh8u6TnaEYPfAi/Gh1TP4mEOXHI6jQOPbeakQ== + dependencies: + "@babel/code-frame" "7.8.3" + address "1.1.2" + browserslist "4.10.0" + chalk "2.4.2" + cross-spawn "7.0.1" + detect-port-alt "1.1.6" + escape-string-regexp "2.0.0" + filesize "6.0.1" + find-up "4.1.0" + fork-ts-checker-webpack-plugin "3.1.1" + global-modules "2.0.0" + globby "8.0.2" + gzip-size "5.1.1" + immer "1.10.0" + inquirer "7.0.4" + is-root "2.1.0" + loader-utils "1.2.3" + open "^7.0.2" + pkg-up "3.1.0" + react-error-overlay "^6.0.7" + recursive-readdir "2.2.2" + shell-quote "1.7.2" + strip-ansi "6.0.0" + text-table "0.2.0" + +react-dom@^16.8.4: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.13.1.tgz#c1bd37331a0486c078ee54c4740720993b2e0e7f" + integrity sha512-81PIMmVLnCNLO/fFOQxdQkvEq/+Hfpv24XNJfpyZhTRfO0QcmQIF/PgCa1zCOj2w1hrn12MFLyaJ/G0+Mxtfag== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + prop-types "^15.6.2" + scheduler "^0.19.1" + +react-dropdown-aria@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/react-dropdown-aria/-/react-dropdown-aria-2.0.6.tgz#40cec5edd97a591d2f29e8c05aa8c53230e2aa6e" + integrity sha512-/9NlFopChlSKmuGL2P6S3oDwl9ddXcbNLnd1a7POov4f5/oGtSc3qBFmS4wH5xmLJe/38MhPOKF3e2q3laRi1g== + dependencies: + emotion "^9.2.6" + +react-error-overlay@^6.0.7: + version "6.0.7" + resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.7.tgz#1dcfb459ab671d53f660a991513cb2f0a0553108" + integrity sha512-TAv1KJFh3RhqxNvhzxj6LeT5NWklP6rDr2a0jaTfsZ5wSZWHOGeqQyejUp3xxLfPt2UpyJEcVQB/zyPcmonNFA== + +react-fast-compare@^3.1.1: + version "3.2.0" + resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz#641a9da81b6a6320f270e89724fb45a0b39e43bb" + integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA== + +react-helmet@^6.0.0-beta: + version "6.1.0" + resolved "https://registry.yarnpkg.com/react-helmet/-/react-helmet-6.1.0.tgz#a750d5165cb13cf213e44747502652e794468726" + integrity sha512-4uMzEY9nlDlgxr61NL3XbKRy1hEkXmKNXhjbAIOVw5vcFrsdYbH2FEwcNyWvWinl103nXgzYNlns9ca+8kFiWw== + dependencies: + object-assign "^4.1.1" + prop-types "^15.7.2" + react-fast-compare "^3.1.1" + react-side-effect "^2.1.0" + +react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1, react-is@^16.8.4: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-live@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/react-live/-/react-live-2.2.2.tgz#834edf1c11204e49fa7468166316b2e70da1a6b0" + integrity sha512-kJYAzKnPsR4oXleAX9lLsJA330BhTmSWHhr3ienZA2E/0eFDRodGl3I7sge8pp1vjc2K5Aaz73KpFUnV7Lq/DQ== + dependencies: + buble "0.19.6" + core-js "^2.4.1" + create-react-context "0.2.2" + dom-iterator "^1.0.0" + prism-react-renderer "^1.0.1" + prop-types "^15.5.8" + react-simple-code-editor "^0.10.0" + unescape "^1.0.1" + +react-loadable-ssr-addon@^0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/react-loadable-ssr-addon/-/react-loadable-ssr-addon-0.2.3.tgz#55057abf95628d47727c68e966a6b3a53cde34e0" + integrity sha512-vPCqsmiafAMDcS9MLgXw3m4yMI40v1UeI8FTYJJkjf85LugKNnHf6D9yoDTzYwp8wEGF5viekwOD03ZPxSwnQQ== + +react-loadable@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/react-loadable/-/react-loadable-5.5.0.tgz#582251679d3da86c32aae2c8e689c59f1196d8c4" + integrity sha512-C8Aui0ZpMd4KokxRdVAm2bQtI03k2RMRNzOB+IipV3yxFTSVICv7WoUr5L9ALB5BmKO1iHgZtWM8EvYG83otdg== + dependencies: + prop-types "^15.5.0" + +react-promise@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/react-promise/-/react-promise-3.0.2.tgz#1180f9e9d2e1a5362d005688c002366b4e9f2b67" + integrity sha512-Ez2aFel11b08H2HAWNnKf0GDV5ATGBmxK9UXHXxoKwCEoQey9manXDTwB2n3mhgOvMRzGH/YTHACdqQUjXf6Rw== + +react-router-config@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/react-router-config/-/react-router-config-5.1.1.tgz#0f4263d1a80c6b2dc7b9c1902c9526478194a988" + integrity sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg== + dependencies: + "@babel/runtime" "^7.1.2" + +react-router-dom@^5.1.2: + version "5.2.0" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.2.0.tgz#9e65a4d0c45e13289e66c7b17c7e175d0ea15662" + integrity sha512-gxAmfylo2QUjcwxI63RhQ5G85Qqt4voZpUXSEqCwykV0baaOTQDR1f0PmY8AELqIyVc0NEZUj0Gov5lNGcXgsA== + dependencies: + "@babel/runtime" "^7.1.2" + history "^4.9.0" + loose-envify "^1.3.1" + prop-types "^15.6.2" + react-router "5.2.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-router@5.2.0, react-router@^5.1.2: + version "5.2.0" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.2.0.tgz#424e75641ca8747fbf76e5ecca69781aa37ea293" + integrity sha512-smz1DUuFHRKdcJC0jobGo8cVbhO3x50tCL4icacOlcwDOEQPq4TMqwx3sY1TP+DvtTgz4nm3thuo7A+BK2U0Dw== + dependencies: + "@babel/runtime" "^7.1.2" + history "^4.9.0" + hoist-non-react-statics "^3.1.0" + loose-envify "^1.3.1" + mini-create-react-context "^0.4.0" + path-to-regexp "^1.7.0" + prop-types "^15.6.2" + react-is "^16.6.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-side-effect@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/react-side-effect/-/react-side-effect-2.1.0.tgz#1ce4a8b4445168c487ed24dab886421f74d380d3" + integrity sha512-IgmcegOSi5SNX+2Snh1vqmF0Vg/CbkycU9XZbOHJlZ6kMzTmi3yc254oB1WCkgA7OQtIAoLmcSFuHTc/tlcqXg== + +react-simple-code-editor@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/react-simple-code-editor/-/react-simple-code-editor-0.10.0.tgz#73e7ac550a928069715482aeb33ccba36efe2373" + integrity sha512-bL5W5mAxSW6+cLwqqVWY47Silqgy2DKDTR4hDBrLrUqC5BXc29YVx17l2IZk5v36VcDEq1Bszu2oHm1qBwKqBA== + +react-tabs@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/react-tabs/-/react-tabs-3.1.1.tgz#b363a239f76046bb2158875a1e5921b11064052f" + integrity sha512-HpySC29NN1BkzBAnOC+ajfzPbTaVZcSWzMSjk56uAhPC/rBGtli8lTysR4CfPAyEE/hfweIzagOIoJ7nu80yng== + dependencies: + clsx "^1.1.0" + prop-types "^15.5.0" + +react-toggle@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/react-toggle/-/react-toggle-4.1.1.tgz#2317f67bf918ea3508a96b09dd383efd9da572af" + integrity sha512-+wXlMcSpg8SmnIXauMaZiKpR+r2wp2gMUteroejp2UTSqGTVvZLN+m9EhMzFARBKEw7KpQOwzCyfzeHeAndQGw== + dependencies: + classnames "^2.2.5" + +react@^16.8.4: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react/-/react-16.13.1.tgz#2e818822f1a9743122c063d6410d85c1e3afe48e" + integrity sha512-YMZQQq32xHLX0bz5Mnibv1/LHb3Sqzngu7xstSM+vrkE5Kzr9xE0yMByK5kMoTK30YVJE61WfbxIFFvfeDKT1w== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + prop-types "^15.6.2" + +read-pkg-up@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-6.0.0.tgz#da75ce72762f2fa1f20c5a40d4dd80c77db969e3" + integrity sha512-odtTvLl+EXo1eTsMnoUHRmg/XmXdTkwXVxy4VFE9Kp6cCq7b3l7QMdBndND3eAFzrbSAXC/WCUOQQ9rLjifKZw== + dependencies: + find-up "^4.0.0" + read-pkg "^5.1.1" + type-fest "^0.5.0" + +read-pkg-up@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-7.0.1.tgz#f3a6135758459733ae2b95638056e1854e7ef507" + integrity sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg== + dependencies: + find-up "^4.1.0" + read-pkg "^5.2.0" + type-fest "^0.8.1" + +read-pkg@^5.1.1, read-pkg@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-5.2.0.tgz#7bf295438ca5a33e56cd30e053b34ee7250c93cc" + integrity sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg== + dependencies: + "@types/normalize-package-data" "^2.4.0" + normalize-package-data "^2.5.0" + parse-json "^5.0.0" + type-fest "^0.6.0" + +"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.0, readable-stream@^2.3.3, readable-stream@^2.3.5, readable-stream@^2.3.6, readable-stream@^2.3.7, readable-stream@~2.3.6: + version "2.3.7" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" + integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readable-stream@^3.0.6, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readdirp@^2.0.0, readdirp@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525" + integrity sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ== + dependencies: + graceful-fs "^4.1.11" + micromatch "^3.1.10" + readable-stream "^2.0.2" + +readdirp@^3.4.0, readdirp@~3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.4.0.tgz#9fdccdf9e9155805449221ac645e8303ab5b9ada" + integrity sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ== + dependencies: + picomatch "^2.2.1" + +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + integrity sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q= + dependencies: + resolve "^1.1.6" + +recursive-readdir@2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.2.tgz#9946fb3274e1628de6e36b2f6714953b4845094f" + integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg== + dependencies: + minimatch "3.0.4" + +redeyed@~2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-2.1.1.tgz#8984b5815d99cb220469c99eeeffe38913e6cc0b" + integrity sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs= + dependencies: + esprima "~4.0.0" + +redoc@^2.0.0-rc.31: + version "2.0.0-rc.33" + resolved "https://registry.yarnpkg.com/redoc/-/redoc-2.0.0-rc.33.tgz#df43f533bb0cc283cc209d69d2a91404a24bd8d1" + integrity sha512-1KLdnOU1aBIddgNBcEIU29h3VqXoTT493gT5hjyHg6sE91x9qEVWPYM2A+eETQFz5ygTwkBCp6xZDxVs+HIA9w== + dependencies: + "@types/node" "^13.11.1" + classnames "^2.2.6" + decko "^1.2.0" + dompurify "^2.0.8" + eventemitter3 "^4.0.0" + json-pointer "^0.6.0" + json-schema-ref-parser "^6.1.0" + lunr "2.3.8" + mark.js "^8.11.1" + marked "^0.7.0" + memoize-one "~5.1.1" + mobx-react "6.1.5" + openapi-sampler "^1.0.0-beta.16" + perfect-scrollbar "^1.4.0" + polished "^3.4.4" + prismjs "^1.19.0" + prop-types "^15.7.2" + react-dropdown-aria "^2.0.6" + react-tabs "^3.1.0" + slugify "^1.4.0" + stickyfill "^1.1.1" + swagger2openapi "^5.3.4" + tslib "^1.11.1" + url-template "^2.0.8" + +reftools@^1.1.0, reftools@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/reftools/-/reftools-1.1.3.tgz#f430d11677d81ae97b8dbb3836713bb52b1cd0a7" + integrity sha512-JTlhKmSzqE/gt5Z5RX25yZDq67MlRRtTz1gLy/NY+wPDx1e1vEJsv1PoNrpKZBwitcEMXs2k7pzmbmraP1ZMAQ== + +regenerate-unicode-properties@^8.2.0: + version "8.2.0" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz#e5de7111d655e7ba60c057dbe9ff37c87e65cdec" + integrity sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA== + dependencies: + regenerate "^1.4.0" + +regenerate@^1.4.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.1.tgz#cad92ad8e6b591773485fbe05a485caf4f457e6f" + integrity sha512-j2+C8+NtXQgEKWk49MMP5P/u2GhnahTtVkRIHr5R5lVRlbKvmQ+oS+A5aLKWp2ma5VkT8sh6v+v4hbH0YHR66A== + +regenerator-runtime@^0.13.4: + version "0.13.5" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz#d878a1d094b4306d10b9096484b33ebd55e26697" + integrity sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA== + +regenerator-transform@^0.14.2: + version "0.14.4" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.4.tgz#5266857896518d1616a78a0479337a30ea974cc7" + integrity sha512-EaJaKPBI9GvKpvUz2mz4fhx7WPgvwRLY9v3hlNHWmAuJHI13T4nwKnNvm5RWJzEdnI5g5UwtOww+S8IdoUC2bw== + dependencies: + "@babel/runtime" "^7.8.4" + private "^0.1.8" + +regex-not@^1.0.0, regex-not@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" + integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A== + dependencies: + extend-shallow "^3.0.2" + safe-regex "^1.1.0" + +regexp.prototype.flags@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.3.0.tgz#7aba89b3c13a64509dabcf3ca8d9fbb9bdf5cb75" + integrity sha512-2+Q0C5g951OlYlJz6yu5/M33IcsESLlLfsyIaLJaG4FA2r4yP8MvVMJUUP/fVBkSpbbbZlS5gynbEWLipiiXiQ== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + +regexpu-core@^4.2.0, regexpu-core@^4.5.4, regexpu-core@^4.7.0: + version "4.7.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.7.0.tgz#fcbf458c50431b0bb7b45d6967b8192d91f3d938" + integrity sha512-TQ4KXRnIn6tz6tjnrXEkD/sshygKH/j5KzK86X8MkeHyZ8qst/LZ89j3X4/8HEIfHANTFIP/AbXakeRhWIl5YQ== + dependencies: + regenerate "^1.4.0" + regenerate-unicode-properties "^8.2.0" + regjsgen "^0.5.1" + regjsparser "^0.6.4" + unicode-match-property-ecmascript "^1.0.4" + unicode-match-property-value-ecmascript "^1.2.0" + +registry-auth-token@^3.0.1: + version "3.4.0" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-3.4.0.tgz#d7446815433f5d5ed6431cd5dca21048f66b397e" + integrity sha512-4LM6Fw8eBQdwMYcES4yTnn2TqIasbXuwDx3um+QRs7S55aMKCBKBxvPXl2RiUjHwuJLTyYfxSpmfSAjQpcuP+A== + dependencies: + rc "^1.1.6" + safe-buffer "^5.0.1" + +registry-auth-token@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-4.2.0.tgz#1d37dffda72bbecd0f581e4715540213a65eb7da" + integrity sha512-P+lWzPrsgfN+UEpDS3U8AQKg/UjZX6mQSJueZj3EK+vNESoqBSpBUD3gmu4sF9lOsjXWjF11dQKUqemf3veq1w== + dependencies: + rc "^1.2.8" + +registry-url@^3.0.3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942" + integrity sha1-PU74cPc93h138M+aOBQyRE4XSUI= + dependencies: + rc "^1.0.1" + +registry-url@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-5.1.0.tgz#e98334b50d5434b81136b44ec638d9c2009c5009" + integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw== + dependencies: + rc "^1.2.8" + +regjsgen@^0.5.1: + version "0.5.2" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.2.tgz#92ff295fb1deecbf6ecdab2543d207e91aa33733" + integrity sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A== + +regjsparser@^0.6.4: + version "0.6.4" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.4.tgz#a769f8684308401a66e9b529d2436ff4d0666272" + integrity sha512-64O87/dPDgfk8/RQqC4gkZoGyyWFIEUTTh80CU6CWuK5vkCGyekIx+oKcEIYtP/RAxSQltCZHCNu/mdd7fqlJw== + dependencies: + jsesc "~0.5.0" + +rehype-parse@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/rehype-parse/-/rehype-parse-6.0.2.tgz#aeb3fdd68085f9f796f1d3137ae2b85a98406964" + integrity sha512-0S3CpvpTAgGmnz8kiCyFLGuW5yA4OQhyNTm/nwPopZ7+PI11WnGl1TTWTGv/2hPEe/g2jRLlhVVSsoDH8waRug== + dependencies: + hast-util-from-parse5 "^5.0.0" + parse5 "^5.0.0" + xtend "^4.0.0" + +relateurl@^0.2.7: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + integrity sha1-VNvzd+UUQKypCkzSdGANP/LYiKk= + +remark-admonitions@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/remark-admonitions/-/remark-admonitions-1.2.1.tgz#87caa1a442aa7b4c0cafa04798ed58a342307870" + integrity sha512-Ji6p68VDvD+H1oS95Fdx9Ar5WA2wcDA4kwrrhVU7fGctC6+d3uiMICu7w7/2Xld+lnU7/gi+432+rRbup5S8ow== + dependencies: + rehype-parse "^6.0.2" + unified "^8.4.2" + unist-util-visit "^2.0.1" + +remark-collapse@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/remark-collapse/-/remark-collapse-0.1.2.tgz#7ebf0b0e0932f39a8599a754906e0a097906070b" + integrity sha1-fr8LDgky85qFmadUkG4KCXkGBws= + dependencies: + mdast-util-heading-range "^2.0.1" + mdast-util-to-string "^1.0.2" + +remark-emoji@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/remark-emoji/-/remark-emoji-2.1.0.tgz#69165d1181b98a54ad5d9ef811003d53d7ebc7db" + integrity sha512-lDddGsxXURV01WS9WAiS9rO/cedO1pvr9tahtLhr6qCGFhHG4yZSJW3Ha4Nw9Uk1hLNmUBtPC0+m45Ms+xEitg== + dependencies: + emoticon "^3.2.0" + node-emoji "^1.10.0" + unist-util-visit "^2.0.2" + +remark-footnotes@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/remark-footnotes/-/remark-footnotes-1.0.0.tgz#9c7a97f9a89397858a50033373020b1ea2aad011" + integrity sha512-X9Ncj4cj3/CIvLI2Z9IobHtVi8FVdUrdJkCNaL9kdX8ohfsi18DXHsCVd/A7ssARBdccdDb5ODnt62WuEWaM/g== + +remark-mdx@1.6.16: + version "1.6.16" + resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-1.6.16.tgz#13ee40ad0614a1cc179aca3604d7f1b79e498a2f" + integrity sha512-xqZhBQ4TonFiSFpVt6SnTLRnxstu7M6pcaOibKZhqzk4zMRVacVenD7iECjfESK+72LkPm/NW+0r5ahJAg7zlQ== + dependencies: + "@babel/core" "7.10.5" + "@babel/helper-plugin-utils" "7.10.4" + "@babel/plugin-proposal-object-rest-spread" "7.10.4" + "@babel/plugin-syntax-jsx" "7.10.4" + "@mdx-js/util" "1.6.16" + is-alphabetical "1.0.4" + remark-parse "8.0.3" + unified "9.1.0" + +remark-parse@8.0.3: + version "8.0.3" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-8.0.3.tgz#9c62aa3b35b79a486454c690472906075f40c7e1" + integrity sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q== + dependencies: + ccount "^1.0.0" + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^2.0.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^2.0.0" + vfile-location "^3.0.0" + xtend "^4.0.1" + +remark-parse@^8.0.0: + version "8.0.2" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-8.0.2.tgz#5999bc0b9c2e3edc038800a64ff103d0890b318b" + integrity sha512-eMI6kMRjsAGpMXXBAywJwiwAse+KNpmt+BK55Oofy4KvBZEqUDj6mWbGLJZrujoPIPPxDXzn3T9baRlpsm2jnQ== + dependencies: + ccount "^1.0.0" + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^2.0.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^2.0.0" + vfile-location "^3.0.0" + xtend "^4.0.1" + +remark-sources@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/remark-sources/-/remark-sources-1.0.3.tgz#932b7a42c4d32d04325181fb16a109dffa37c3d7" + integrity sha512-ecw4s6aD0VffjJFxwCrgcOnX1DcXVYCPuwMv8PCAw6y7vJhkPcEDq+GLLYl0yABlmJeDD1r6bcXLHDgr6vcAPA== + dependencies: + njct "^8.0.0" + unist-util-visit-children "^1.1.4" + +remark-squeeze-paragraphs@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz#76eb0e085295131c84748c8e43810159c5653ead" + integrity sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw== + dependencies: + mdast-squeeze-paragraphs "^4.0.0" + +remark-stringify@^8.0.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-8.1.0.tgz#1e555f4402e445c364fb23d12fc5f5e0337ec8b7" + integrity sha512-FSPZv1ds76oAZjurhhuV5qXSUSoz6QRPuwYK38S41sLHwg4oB7ejnmZshj7qwjgYLf93kdz6BOX9j5aidNE7rA== + dependencies: + ccount "^1.0.0" + is-alphanumeric "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + longest-streak "^2.0.1" + markdown-escapes "^1.0.0" + markdown-table "^2.0.0" + mdast-util-compact "^2.0.0" + parse-entities "^2.0.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + stringify-entities "^3.0.0" + unherit "^1.0.4" + xtend "^4.0.1" + +remark@^12.0.0: + version "12.0.0" + resolved "https://registry.yarnpkg.com/remark/-/remark-12.0.0.tgz#d1c145c07341c9232f93b2f8539d56da15a2548c" + integrity sha512-oX4lMIS0csgk8AEbzY0h2jdR0ngiCHOpwwpxjmRa5TqAkeknY+tkhjRJGZqnCmvyuWh55/0SW5WY3R3nn3PH9A== + dependencies: + remark-parse "^8.0.0" + remark-stringify "^8.0.0" + unified "^9.0.0" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8= + +renderkid@^2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.3.tgz#380179c2ff5ae1365c522bf2fcfcff01c5b74149" + integrity sha512-z8CLQp7EZBPCwCnncgf9C4XAi3WR0dv+uWu/PjIyhhAb5d6IJ/QZqlHFprHeKT+59//V6BNUsLbvN8+2LarxGA== + dependencies: + css-select "^1.1.0" + dom-converter "^0.2" + htmlparser2 "^3.3.0" + strip-ansi "^3.0.0" + utila "^0.4.0" + +repeat-element@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" + integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== + +repeat-string@^1.0.0, repeat-string@^1.5.4, repeat-string@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= + +replace-ext@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-1.0.0.tgz#de63128373fcbf7c3ccfa4de5a480c45a67958eb" + integrity sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs= + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= + +"require-like@>= 0.1.1": + version "0.1.2" + resolved "https://registry.yarnpkg.com/require-like/-/require-like-0.1.2.tgz#ad6f30c13becd797010c468afa775c0c0a6b47fa" + integrity sha1-rW8wwTvs15cBDEaK+ndcDAprR/o= + +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +require-package-name@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/require-package-name/-/require-package-name-2.0.1.tgz#c11e97276b65b8e2923f75dabf5fb2ef0c3841b9" + integrity sha1-wR6XJ2tluOKSP3Xav1+y7ww4Qbk= + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= + +resolve-cwd@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a" + integrity sha1-AKn3OHVW4nA46uIyyqNypqWbZlo= + dependencies: + resolve-from "^3.0.0" + +resolve-from@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" + integrity sha1-six699nWiBvItuZTM17rywoYh0g= + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve-pathname@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz#99d02224d3cf263689becbb393bc560313025dcd" + integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng== + +resolve-url@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= + +resolve@^1.1.6, resolve@^1.10.0, resolve@^1.12.0, resolve@^1.3.2, resolve@^1.8.1: + version "1.17.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.17.0.tgz#b25941b54968231cc2d1bb76a79cb7f2c0bf8444" + integrity sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w== + dependencies: + path-parse "^1.0.6" + +resolve@^2.0.0-next.1: + version "2.0.0-next.1" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.1.tgz#4d96ccb89bf82d54ab037241ae053db4e92bb5f1" + integrity sha512-ZGTmuLZAW++TDjgslfUMRZcv7kXHv8z0zwxvuRWOPjnqc56HVsn1lVaqsWOZeQ8MwiilPVJLrcPVKG909QsAfA== + dependencies: + path-parse "^1.0.6" + +responselike@1.0.2, responselike@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= + dependencies: + lowercase-keys "^1.0.0" + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368= + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +ret@~0.1.10: + version "0.1.15" + resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" + integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== + +retry@^0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/retry/-/retry-0.12.0.tgz#1b42a6266a21f07421d1b0b54b7dc167b01c013b" + integrity sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs= + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rgb-regex@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" + integrity sha1-wODWiC3w4jviVKR16O3UGRX+rrE= + +rgba-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3" + integrity sha1-QzdOLiyglosO8VI0YLfXMP8i7rM= + +rimraf@^2.5.4, rimraf@^2.6.3, rimraf@^2.7.1: + version "2.7.1" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" + integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== + dependencies: + glob "^7.1.3" + +rimraf@^3.0.0, rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +ripemd160@^2.0.0, ripemd160@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" + integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +run-async@^2.2.0, run-async@^2.3.0, run-async@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455" + integrity sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ== + +run-parallel@^1.1.4, run-parallel@^1.1.9: + version "1.1.9" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.1.9.tgz#c9dd3a7cf9f4b2c4b6244e173a6ed866e61dd679" + integrity sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q== + +run-queue@^1.0.0, run-queue@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47" + integrity sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec= + dependencies: + aproba "^1.1.1" + +rx@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/rx/-/rx-4.1.0.tgz#a5f13ff79ef3b740fe30aa803fb09f98805d4782" + integrity sha1-pfE/957zt0D+MKqAP7CfmIBdR4I= + +rxjs@^6.4.0: + version "6.6.2" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.2.tgz#8096a7ac03f2cc4fe5860ef6e572810d9e01c0d2" + integrity sha512-BHdBMVoWC2sL26w//BCu3YzKT4s2jip/WhwsGEDmeKYBhKDZeYezVUnHatYB7L85v5xs0BAQmg6BEYJEKxBabg== + dependencies: + tslib "^1.9.0" + +rxjs@^6.5.3: + version "6.5.5" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.5.5.tgz#c5c884e3094c8cfee31bf27eb87e54ccfc87f9ec" + integrity sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ== + dependencies: + tslib "^1.9.0" + +rxjs@^6.6.0: + version "6.6.0" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.0.tgz#af2901eedf02e3a83ffa7f886240ff9018bbec84" + integrity sha512-3HMA8z/Oz61DUHe+SdOiQyzIf4tOx5oQHmMir7IZEu6TMqCLHT4LRcmNaUS0NwOz8VLvmmBduMsoaUvMaIiqzg== + dependencies: + tslib "^1.9.0" + +safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +safe-join@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/safe-join/-/safe-join-0.1.3.tgz#02ca7a7f2fed4f9cde3f72eb6ade4423bd73d506" + integrity sha512-Ylh1EWn4pmL57HRV/oi4Ye7ws5AxKkdGpyDdWsvZob5VLH8xnQpG8tqmHD5v4SdKlN7hyrBjYt7Jm3faeC+uJg== + +safe-json-stringify@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/safe-json-stringify/-/safe-json-stringify-1.2.0.tgz#356e44bc98f1f93ce45df14bcd7c01cda86e0afd" + integrity sha512-gH8eh2nZudPQO6TytOvbxnuhYBOvDBBLW52tz5q6X58lJcd/tkmqFR+5Z9adS8aJtURSXWThWy/xJtJwixErvg== + +safe-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" + integrity sha1-QKNmnzsHfR6UPURinhV91IAjvy4= + dependencies: + ret "~0.1.10" + +"safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sax@1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.1.tgz#7b8e656190b228e81a66aea748480d828cd2d37a" + integrity sha1-e45lYZCyKOgaZq6nSEgNgozS03o= + +sax@>=0.6.0, sax@~1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +scheduler@^0.19.1: + version "0.19.1" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.19.1.tgz#4f3e2ed2c1a7d65681f4c854fa8c5a1ccb40f196" + integrity sha512-n/zwRWRYSUj0/3g/otKDRPMh6qv2SYMWNq85IEa8iZyAv8od9zDYpGSnpBEjNgcMNq6Scbu5KfIPxNF72R/2EA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +schema-utils@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770" + integrity sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g== + dependencies: + ajv "^6.1.0" + ajv-errors "^1.0.0" + ajv-keywords "^3.1.0" + +schema-utils@^2.0.0, schema-utils@^2.6.5, schema-utils@^2.6.6, schema-utils@^2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.7.0.tgz#17151f76d8eae67fbbf77960c33c676ad9f4efc7" + integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A== + dependencies: + "@types/json-schema" "^7.0.4" + ajv "^6.12.2" + ajv-keywords "^3.4.1" + +section-matter@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/section-matter/-/section-matter-1.0.0.tgz#e9041953506780ec01d59f292a19c7b850b84167" + integrity sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA== + dependencies: + extend-shallow "^2.0.1" + kind-of "^6.0.0" + +seek-bzip@^1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/seek-bzip/-/seek-bzip-1.0.6.tgz#35c4171f55a680916b52a07859ecf3b5857f21c4" + integrity sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ== + dependencies: + commander "^2.8.1" + +select-hose@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" + integrity sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo= + +select@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/select/-/select-1.1.2.tgz#0e7350acdec80b1108528786ec1d4418d11b396d" + integrity sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0= + +selfsigned@^1.10.7: + version "1.10.7" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.7.tgz#da5819fd049d5574f28e88a9bcc6dbc6e6f3906b" + integrity sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA== + dependencies: + node-forge "0.9.0" + +semver-diff@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36" + integrity sha1-S7uEN8jTfksM8aaP1ybsbWRdbTY= + dependencies: + semver "^5.0.3" + +semver-diff@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-3.1.1.tgz#05f77ce59f325e00e2706afd67bb506ddb1ca32b" + integrity sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg== + dependencies: + semver "^6.3.0" + +"semver@2 || 3 || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +semver@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.0.0.tgz#5f3ca35761e47e05b206c6daff2cf814f0316b8e" + integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== + +semver@^6.0.0, semver@^6.2.0, semver@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +semver@^7.1.3, semver@^7.3.2: + version "7.3.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.2.tgz#604962b052b81ed0786aae84389ffba70ffd3938" + integrity sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ== + +send@0.17.1: + version "0.17.1" + resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" + integrity sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg== + dependencies: + debug "2.6.9" + depd "~1.1.2" + destroy "~1.0.4" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "~1.7.2" + mime "1.6.0" + ms "2.1.1" + on-finished "~2.3.0" + range-parser "~1.2.1" + statuses "~1.5.0" + +serialize-javascript@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-2.1.2.tgz#ecec53b0e0317bdc95ef76ab7074b7384785fa61" + integrity sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ== + +serialize-javascript@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-3.1.0.tgz#8bf3a9170712664ef2561b44b691eafe399214ea" + integrity sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg== + dependencies: + randombytes "^2.1.0" + +serve-handler@^6.1.3: + version "6.1.3" + resolved "https://registry.yarnpkg.com/serve-handler/-/serve-handler-6.1.3.tgz#1bf8c5ae138712af55c758477533b9117f6435e8" + integrity sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w== + dependencies: + bytes "3.0.0" + content-disposition "0.5.2" + fast-url-parser "1.1.3" + mime-types "2.1.18" + minimatch "3.0.4" + path-is-inside "1.0.2" + path-to-regexp "2.2.1" + range-parser "1.2.0" + +serve-index@^1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" + integrity sha1-03aNabHn2C5c4FD/9bRTvqEqkjk= + dependencies: + accepts "~1.3.4" + batch "0.6.1" + debug "2.6.9" + escape-html "~1.0.3" + http-errors "~1.6.2" + mime-types "~2.1.17" + parseurl "~1.3.2" + +serve-static@1.14.1: + version "1.14.1" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.1.tgz#666e636dc4f010f7ef29970a88a674320898b2f9" + integrity sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.17.1" + +set-blocking@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= + +set-value@^2.0.0, set-value@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.1.tgz#a18d40530e6f07de4228c7defe4227af8cad005b" + integrity sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw== + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.3" + split-string "^3.0.1" + +setimmediate@^1.0.4, setimmediate@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + integrity sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU= + +setprototypeof@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" + integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ== + +setprototypeof@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683" + integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw== + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + +sha.js@^2.4.0, sha.js@^2.4.8: + version "2.4.11" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" + integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +shallow-clone@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060" + integrity sha1-WQnodLp3EG1zrEFM/sH/yofZcGA= + dependencies: + is-extendable "^0.1.1" + kind-of "^2.0.1" + lazy-cache "^0.2.3" + mixin-object "^2.0.1" + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= + dependencies: + shebang-regex "^1.0.0" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +shell-quote@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.7.2.tgz#67a7d02c76c9da24f99d20808fcaded0e0e04be2" + integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg== + +shelljs@^0.8.4: + version "0.8.4" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.4.tgz#de7684feeb767f8716b326078a8a00875890e3c2" + integrity sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ== + dependencies: + glob "^7.0.0" + interpret "^1.0.0" + rechoir "^0.6.2" + +should-equal@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/should-equal/-/should-equal-2.0.0.tgz#6072cf83047360867e68e98b09d71143d04ee0c3" + integrity sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA== + dependencies: + should-type "^1.4.0" + +should-format@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/should-format/-/should-format-3.0.3.tgz#9bfc8f74fa39205c53d38c34d717303e277124f1" + integrity sha1-m/yPdPo5IFxT04w01xcwPidxJPE= + dependencies: + should-type "^1.3.0" + should-type-adaptors "^1.0.1" + +should-type-adaptors@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz#401e7f33b5533033944d5cd8bf2b65027792e27a" + integrity sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA== + dependencies: + should-type "^1.3.0" + should-util "^1.0.0" + +should-type@^1.3.0, should-type@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/should-type/-/should-type-1.4.0.tgz#0756d8ce846dfd09843a6947719dfa0d4cff5cf3" + integrity sha1-B1bYzoRt/QmEOmlHcZ36DUz/XPM= + +should-util@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/should-util/-/should-util-1.0.1.tgz#fb0d71338f532a3a149213639e2d32cbea8bcb28" + integrity sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g== + +should@^13.2.1: + version "13.2.3" + resolved "https://registry.yarnpkg.com/should/-/should-13.2.3.tgz#96d8e5acf3e97b49d89b51feaa5ae8d07ef58f10" + integrity sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ== + dependencies: + should-equal "^2.0.0" + should-format "^3.0.3" + should-type "^1.4.0" + should-type-adaptors "^1.0.1" + should-util "^1.0.0" + +signal-exit@^3.0.0, signal-exit@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +simple-swizzle@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" + integrity sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo= + dependencies: + is-arrayish "^0.3.1" + +sitemap@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-3.2.2.tgz#3f77c358fa97b555c879e457098e39910095c62b" + integrity sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg== + dependencies: + lodash.chunk "^4.2.0" + lodash.padstart "^4.6.1" + whatwg-url "^7.0.0" + xmlbuilder "^13.0.0" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + integrity sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU= + +slash@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" + integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slugify@^1.4.0: + version "1.4.4" + resolved "https://registry.yarnpkg.com/slugify/-/slugify-1.4.4.tgz#2f032ffa52b1e1ca2a27737c1ce47baae3d0883a" + integrity sha512-N2+9NJ8JzfRMh6PQLrBeDEnVDQZSytE/W4BTC4fNNPmO90Uu58uNwSlIJSs+lmPgWsaAF79WLhVPe5tuy7spjw== + +snapdragon-node@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" + integrity sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw== + dependencies: + define-property "^1.0.0" + isobject "^3.0.0" + snapdragon-util "^3.0.1" + +snapdragon-util@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" + integrity sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ== + dependencies: + kind-of "^3.2.0" + +snapdragon@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" + integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg== + dependencies: + base "^0.11.1" + debug "^2.2.0" + define-property "^0.2.5" + extend-shallow "^2.0.1" + map-cache "^0.2.2" + source-map "^0.5.6" + source-map-resolve "^0.5.0" + use "^3.1.0" + +sockjs-client@1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.4.0.tgz#c9f2568e19c8fd8173b4997ea3420e0bb306c7d5" + integrity sha512-5zaLyO8/nri5cua0VtOrFXBPK1jbL4+1cebT/mmKA1E1ZXOvJrII75bPu0l0k843G/+iAbhEqzyKr0w/eCCj7g== + dependencies: + debug "^3.2.5" + eventsource "^1.0.7" + faye-websocket "~0.11.1" + inherits "^2.0.3" + json3 "^3.3.2" + url-parse "^1.4.3" + +sockjs@0.3.20: + version "0.3.20" + resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.20.tgz#b26a283ec562ef8b2687b44033a4eeceac75d855" + integrity sha512-SpmVOVpdq0DJc0qArhF3E5xsxvaiqGNb73XfgBpK1y3UD5gs8DSo8aCTsuT5pX8rssdc2NDIzANwP9eCAiSdTA== + dependencies: + faye-websocket "^0.10.0" + uuid "^3.4.0" + websocket-driver "0.6.5" + +sort-keys-length@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/sort-keys-length/-/sort-keys-length-1.0.1.tgz#9cb6f4f4e9e48155a6aa0671edd336ff1479a188" + integrity sha1-nLb09OnkgVWmqgZx7dM2/xR5oYg= + dependencies: + sort-keys "^1.0.0" + +sort-keys@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-1.1.2.tgz#441b6d4d346798f1b4e49e8920adfba0e543f9ad" + integrity sha1-RBttTTRnmPG05J6JIK37oOVD+a0= + dependencies: + is-plain-obj "^1.0.0" + +sort-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" + integrity sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg= + dependencies: + is-plain-obj "^1.0.0" + +source-list-map@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" + integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== + +source-map-resolve@^0.5.0: + version "0.5.3" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a" + integrity sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw== + dependencies: + atob "^2.1.2" + decode-uri-component "^0.2.0" + resolve-url "^0.2.1" + source-map-url "^0.4.0" + urix "^0.1.0" + +source-map-support@~0.5.12: + version "0.5.19" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" + integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map-url@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" + integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM= + +source-map@^0.5.0, source-map@^0.5.6, source-map@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +source-map@^0.7.2, source-map@^0.7.3: + version "0.7.3" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" + integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== + +sourcemap-codec@^1.4.4: + version "1.4.8" + resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4" + integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA== + +space-separated-tokens@^1.0.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz#85f32c3d10d9682007e917414ddc5c26d1aa6899" + integrity sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA== + +spdx-correct@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" + integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" + integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== + +spdx-expression-parse@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" + integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.5" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" + integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== + +spdy-transport@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-3.0.0.tgz#00d4863a6400ad75df93361a1608605e5dcdcf31" + integrity sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw== + dependencies: + debug "^4.1.0" + detect-node "^2.0.4" + hpack.js "^2.1.6" + obuf "^1.1.2" + readable-stream "^3.0.6" + wbuf "^1.7.3" + +spdy@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/spdy/-/spdy-4.0.2.tgz#b74f466203a3eda452c02492b91fb9e84a27677b" + integrity sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA== + dependencies: + debug "^4.1.0" + handle-thing "^2.0.0" + http-deceiver "^1.2.7" + select-hose "^2.0.0" + spdy-transport "^3.0.0" + +split-string@^3.0.1, split-string@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" + integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw== + dependencies: + extend-shallow "^3.0.0" + +split2@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/split2/-/split2-1.1.1.tgz#162d9b18865f02ab2f2ad9585522db9b54c481f9" + integrity sha1-Fi2bGIZfAqsvKtlYVSLbm1TEgfk= + dependencies: + through2 "~2.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= + +ssri@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.1.tgz#2a3c41b28dd45b62b63676ecb74001265ae9edd8" + integrity sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA== + dependencies: + figgy-pudding "^3.5.1" + +ssri@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-7.1.0.tgz#92c241bf6de82365b5c7fb4bd76e975522e1294d" + integrity sha512-77/WrDZUWocK0mvA5NTRQyveUf+wsrIc6vyrxpS8tVvYBcX215QbafrJR3KtkpskIzoFLqqNuuYQvxaMjXJ/0g== + dependencies: + figgy-pudding "^3.5.1" + minipass "^3.1.1" + +stable@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" + integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== + +stack-generator@^2.0.3: + version "2.0.5" + resolved "https://registry.yarnpkg.com/stack-generator/-/stack-generator-2.0.5.tgz#fb00e5b4ee97de603e0773ea78ce944d81596c36" + integrity sha512-/t1ebrbHkrLrDuNMdeAcsvynWgoH/i4o8EGGfX7dEYDoTXOYVAkEpFdtshlvabzc6JlJ8Kf9YdFEoz7JkzGN9Q== + dependencies: + stackframe "^1.1.1" + +stack-trace@0.0.x: + version "0.0.10" + resolved "https://registry.yarnpkg.com/stack-trace/-/stack-trace-0.0.10.tgz#547c70b347e8d32b4e108ea1a2a159e5fdde19c0" + integrity sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA= + +stackframe@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/stackframe/-/stackframe-1.2.0.tgz#52429492d63c62eb989804c11552e3d22e779303" + integrity sha512-GrdeshiRmS1YLMYgzF16olf2jJ/IzxXY9lhKOskuVziubpTYcYqyOwYeJKzQkwy7uN0fYSsbsC4RQaXf9LCrYA== + +state-toggle@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.3.tgz#e123b16a88e143139b09c6852221bc9815917dfe" + integrity sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ== + +static-extend@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" + integrity sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY= + dependencies: + define-property "^0.2.5" + object-copy "^0.1.0" + +static-server@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/static-server/-/static-server-2.2.1.tgz#49e3cae2a001736b0ee9e95d21d3d843fc95efaa" + integrity sha512-j5eeW6higxYNmXMIT8iHjsdiViTpQDthg7o+SHsRtqdbxscdHqBHXwrXjHC8hL3F0Tsu34ApUpDkwzMBPBsrLw== + dependencies: + chalk "^0.5.1" + commander "^2.3.0" + file-size "0.0.5" + mime "^1.2.11" + opn "^5.2.0" + +"statuses@>= 1.4.0 < 2", "statuses@>= 1.5.0 < 2", statuses@~1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= + +std-env@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/std-env/-/std-env-2.2.1.tgz#2ffa0fdc9e2263e0004c1211966e960948a40f6b" + integrity sha512-IjYQUinA3lg5re/YMlwlfhqNRTzMZMqE+pezevdcTaHceqx8ngEi1alX9nNCk9Sc81fy1fLDeQoaCzeiW1yBOQ== + dependencies: + ci-info "^1.6.0" + +stickyfill@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/stickyfill/-/stickyfill-1.1.1.tgz#39413fee9d025c74a7e59ceecb23784cc0f17f02" + integrity sha1-OUE/7p0CXHSn5ZzuyyN4TMDxfwI= + +stream-browserify@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.2.tgz#87521d38a44aa7ee91ce1cd2a47df0cb49dd660b" + integrity sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg== + dependencies: + inherits "~2.0.1" + readable-stream "^2.0.2" + +stream-each@^1.1.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/stream-each/-/stream-each-1.2.3.tgz#ebe27a0c389b04fbcc233642952e10731afa9bae" + integrity sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw== + dependencies: + end-of-stream "^1.1.0" + stream-shift "^1.0.0" + +stream-http@^2.7.2: + version "2.8.3" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc" + integrity sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw== + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^2.3.6" + to-arraybuffer "^1.0.0" + xtend "^4.0.0" + +stream-shift@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" + integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + integrity sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM= + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +string-width@^2.0.0, string-width@^2.1.0, string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string-width@^3.0.0, string-width@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.0.tgz#952182c46cc7b2c313d1596e623992bd163b72b5" + integrity sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.0" + +string.prototype.trimend@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz#85812a6b847ac002270f5808146064c995fb6913" + integrity sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.5" + +string.prototype.trimstart@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz#14af6d9f34b053f7cfc89b72f8f2ee14b9039a54" + integrity sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.5" + +string_decoder@^1.0.0, string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +stringify-entities@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/stringify-entities/-/stringify-entities-3.0.1.tgz#32154b91286ab0869ab2c07696223bd23b6dbfc0" + integrity sha512-Lsk3ISA2++eJYqBMPKcr/8eby1I6L0gP0NlxF8Zja6c05yr/yCYyb2c9PwXjd08Ib3If1vn1rbs1H5ZtVuOfvQ== + dependencies: + character-entities-html4 "^1.0.0" + character-entities-legacy "^1.0.0" + is-alphanumerical "^1.0.0" + is-decimal "^1.0.2" + is-hexadecimal "^1.0.0" + +stringify-object@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" + integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw== + dependencies: + get-own-enumerable-property-symbols "^3.0.0" + is-obj "^1.0.1" + is-regexp "^1.0.0" + +strip-ansi-control-characters@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi-control-characters/-/strip-ansi-control-characters-2.0.0.tgz#8875b5ba3a859a0a44f94e1cf7d3eda8980997b9" + integrity sha512-Q0/k5orrVGeaOlIOUn1gybGU0IcAbgHQT1faLo5hik4DqClKVSaka5xOhNNoRgtfztHVxCYxi7j71mrWom0bIw== + +strip-ansi@6.0.0, strip-ansi@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" + integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== + dependencies: + ansi-regex "^5.0.0" + +strip-ansi@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220" + integrity sha1-JfSOoiynkYfzF0pNuHWTR7sSYiA= + dependencies: + ansi-regex "^0.2.1" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= + dependencies: + ansi-regex "^3.0.0" + +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + +strip-bom-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92" + integrity sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI= + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + integrity sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM= + +strip-dirs@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/strip-dirs/-/strip-dirs-2.1.0.tgz#4987736264fc344cf20f6c34aca9d13d1d4ed6c5" + integrity sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g== + dependencies: + is-natural-number "^4.0.1" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8= + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +strip-outer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/strip-outer/-/strip-outer-1.0.1.tgz#b2fd2abf6604b9d1e6013057195df836b8a9d631" + integrity sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg== + dependencies: + escape-string-regexp "^1.0.2" + +style-to-object@0.3.0, style-to-object@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.3.0.tgz#b1b790d205991cc783801967214979ee19a76e46" + integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA== + dependencies: + inline-style-parser "0.1.1" + +styled-components@^4.2.0: + version "4.4.1" + resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-4.4.1.tgz#e0631e889f01db67df4de576fedaca463f05c2f2" + integrity sha512-RNqj14kYzw++6Sr38n7197xG33ipEOktGElty4I70IKzQF1jzaD1U4xQ+Ny/i03UUhHlC5NWEO+d8olRCDji6g== + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/traverse" "^7.0.0" + "@emotion/is-prop-valid" "^0.8.1" + "@emotion/unitless" "^0.7.0" + babel-plugin-styled-components ">= 1" + css-to-react-native "^2.2.2" + memoize-one "^5.0.0" + merge-anything "^2.2.4" + prop-types "^15.5.4" + react-is "^16.6.0" + stylis "^3.5.0" + stylis-rule-sheet "^0.0.10" + supports-color "^5.5.0" + +stylehacks@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-4.0.3.tgz#6718fcaf4d1e07d8a1318690881e8d96726a71d5" + integrity sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g== + dependencies: + browserslist "^4.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + +stylis-rule-sheet@^0.0.10: + version "0.0.10" + resolved "https://registry.yarnpkg.com/stylis-rule-sheet/-/stylis-rule-sheet-0.0.10.tgz#44e64a2b076643f4b52e5ff71efc04d8c3c4a430" + integrity sha512-nTbZoaqoBnmK+ptANthb10ZRZOGC+EmTLLUxeYIuHNkEKcmKgXX1XWKkUBT2Ac4es3NybooPe0SmvKdhKJZAuw== + +stylis@^3.5.0: + version "3.5.4" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-3.5.4.tgz#f665f25f5e299cf3d64654ab949a57c768b73fbe" + integrity sha512-8/3pSmthWM7lsPBKv7NXkzn2Uc9W7NotcwGNpJaa3k7WMM1XDCA4MgT5k/8BIexd5ydZdboXtU90XH9Ec4Bv/Q== + +supports-color@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a" + integrity sha1-2S3iaU6z9nMjlz1649i1W0wiGQo= + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= + +supports-color@^5.0.0, supports-color@^5.3.0, supports-color@^5.4.0, supports-color@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.1.0.tgz#0764abc69c63d5ac842dd4867e8d025e880df8f3" + integrity sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.0.0, supports-color@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.1.0.tgz#68e32591df73e25ad1c4b49108a2ec507962bfd1" + integrity sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g== + dependencies: + has-flag "^4.0.0" + +supports-hyperlinks@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-1.0.1.tgz#71daedf36cc1060ac5100c351bb3da48c29c0ef7" + integrity sha512-HHi5kVSefKaJkGYXbDuKbUGRVxqnWGn3J2e39CYcNJEfWciGq2zYtOhXLTlvrOZW1QU7VX67w7fMmWafHX9Pfw== + dependencies: + has-flag "^2.0.0" + supports-color "^5.0.0" + +svg-parser@^2.0.2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5" + integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ== + +svgo@^1.0.0, svgo@^1.2.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.3.2.tgz#b6dc511c063346c9e415b81e43401145b96d4167" + integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw== + dependencies: + chalk "^2.4.1" + coa "^2.0.2" + css-select "^2.0.0" + css-select-base-adapter "^0.1.1" + css-tree "1.0.0-alpha.37" + csso "^4.0.2" + js-yaml "^3.13.1" + mkdirp "~0.5.1" + object.values "^1.1.0" + sax "~1.2.4" + stable "^0.1.8" + unquote "~1.1.1" + util.promisify "~1.0.0" + +swagger2openapi@^5.3.4: + version "5.4.0" + resolved "https://registry.yarnpkg.com/swagger2openapi/-/swagger2openapi-5.4.0.tgz#1e1c8909f7966b1f455bf1b66490093ac1c0029c" + integrity sha512-f5QqfXawiVijhjMtYqWZ55ESHPZFqrPC8L9idhIiuSX8O2qsa1i4MVGtCM3TQF+Smzr/6WfT/7zBuzG3aTgPAA== + dependencies: + better-ajv-errors "^0.6.1" + call-me-maybe "^1.0.1" + node-fetch-h2 "^2.3.0" + node-readfiles "^0.2.0" + oas-kit-common "^1.0.7" + oas-resolver "^2.3.0" + oas-schema-walker "^1.1.3" + oas-validator "^3.4.0" + reftools "^1.1.0" + yaml "^1.8.3" + yargs "^12.0.5" + +tapable@^1.0.0, tapable@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" + integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA== + +tar-stream@^1.5.2: + version "1.6.2" + resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.6.2.tgz#8ea55dab37972253d9a9af90fdcd559ae435c555" + integrity sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A== + dependencies: + bl "^1.0.0" + buffer-alloc "^1.2.0" + end-of-stream "^1.0.0" + fs-constants "^1.0.0" + readable-stream "^2.3.0" + to-buffer "^1.1.1" + xtend "^4.0.0" + +tar-stream@^2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-2.1.3.tgz#1e2022559221b7866161660f118255e20fa79e41" + integrity sha512-Z9yri56Dih8IaK8gncVPx4Wqt86NDmQTSh49XLZgjWpGZL9GK9HKParS2scqHCC4w6X9Gh2jwaU45V47XTKwVA== + dependencies: + bl "^4.0.1" + end-of-stream "^1.4.1" + fs-constants "^1.0.0" + inherits "^2.0.3" + readable-stream "^3.1.1" + +temp-dir@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/temp-dir/-/temp-dir-1.0.0.tgz#0a7c0ea26d3a39afa7e0ebea9c1fc0bc4daa011d" + integrity sha1-CnwOom06Oa+n4OvqnB/AvE2qAR0= + +tempy@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/tempy/-/tempy-0.3.0.tgz#6f6c5b295695a16130996ad5ab01a8bd726e8bf8" + integrity sha512-WrH/pui8YCwmeiAoxV+lpRH9HpRtgBhSR2ViBPgpGb/wnYDzp21R4MN45fsCGvLROvY67o3byhJRYRONJyImVQ== + dependencies: + temp-dir "^1.0.0" + type-fest "^0.3.1" + unique-string "^1.0.0" + +term-size@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/term-size/-/term-size-1.2.0.tgz#458b83887f288fc56d6fffbfad262e26638efa69" + integrity sha1-RYuDiH8oj8Vtb/+/rSYuJmOO+mk= + dependencies: + execa "^0.7.0" + +term-size@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/term-size/-/term-size-2.2.0.tgz#1f16adedfe9bdc18800e1776821734086fcc6753" + integrity sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw== + +terser-webpack-plugin@^1.4.3: + version "1.4.4" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.4.4.tgz#2c63544347324baafa9a56baaddf1634c8abfc2f" + integrity sha512-U4mACBHIegmfoEe5fdongHESNJWqsGU+W0S/9+BmYGVQDw1+c2Ow05TpMhxjPK1sRb7cuYq1BPl1e5YHJMTCqA== + dependencies: + cacache "^12.0.2" + find-cache-dir "^2.1.0" + is-wsl "^1.1.0" + schema-utils "^1.0.0" + serialize-javascript "^3.1.0" + source-map "^0.6.1" + terser "^4.1.2" + webpack-sources "^1.4.0" + worker-farm "^1.7.0" + +terser-webpack-plugin@^2.3.5: + version "2.3.7" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-2.3.7.tgz#4910ff5d1a872168cc7fa6cd3749e2b0d60a8a0b" + integrity sha512-xzYyaHUNhzgaAdBsXxk2Yvo/x1NJdslUaussK3fdpBbvttm1iIwU+c26dj9UxJcwk2c5UWt5F55MUTIA8BE7Dg== + dependencies: + cacache "^13.0.1" + find-cache-dir "^3.3.1" + jest-worker "^25.4.0" + p-limit "^2.3.0" + schema-utils "^2.6.6" + serialize-javascript "^3.1.0" + source-map "^0.6.1" + terser "^4.6.12" + webpack-sources "^1.4.3" + +terser@^4.1.2, terser@^4.6.12, terser@^4.6.3: + version "4.8.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-4.8.0.tgz#63056343d7c70bb29f3af665865a46fe03a0df17" + integrity sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw== + dependencies: + commander "^2.20.0" + source-map "~0.6.1" + source-map-support "~0.5.12" + +text-hex@1.0.x: + version "1.0.0" + resolved "https://registry.yarnpkg.com/text-hex/-/text-hex-1.0.0.tgz#69dc9c1b17446ee79a92bf5b884bb4b9127506f5" + integrity sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg== + +text-table@0.2.0, text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ= + +through2-filter@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/through2-filter/-/through2-filter-3.0.0.tgz#700e786df2367c2c88cd8aa5be4cf9c1e7831254" + integrity sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA== + dependencies: + through2 "~2.0.0" + xtend "~4.0.0" + +through2-map@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/through2-map/-/through2-map-3.0.0.tgz#a6c3026ce63b4898a997d540506b66ffd970f271" + integrity sha1-psMCbOY7SJipl9VAUGtm/9lw8nE= + dependencies: + through2 "~2.0.0" + xtend "^4.0.0" + +through2@^2.0.0, through2@~2.0.0: + version "2.0.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" + integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ== + dependencies: + readable-stream "~2.3.6" + xtend "~4.0.1" + +through@^2.3.6, through@^2.3.8: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= + +thunky@^1.0.2: + version "1.1.0" + resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d" + integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA== + +time-zone@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/time-zone/-/time-zone-1.0.0.tgz#99c5bf55958966af6d06d83bdf3800dc82faec5d" + integrity sha1-mcW/VZWJZq9tBtg73zgA3IL67F0= + +timed-out@^4.0.0, timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= + +timers-browserify@^2.0.4: + version "2.0.11" + resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.11.tgz#800b1f3eee272e5bc53ee465a04d0e804c31211f" + integrity sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ== + dependencies: + setimmediate "^1.0.4" + +timsort@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/timsort/-/timsort-0.3.0.tgz#405411a8e7e6339fe64db9a234de11dc31e02bd4" + integrity sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q= + +tiny-emitter@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-2.1.0.tgz#1d1a56edfc51c43e863cbb5382a72330e3555423" + integrity sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q== + +tiny-invariant@^1.0.2: + version "1.1.0" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.1.0.tgz#634c5f8efdc27714b7f386c35e6760991d230875" + integrity sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw== + +tiny-warning@^1.0.0, tiny-warning@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.3.tgz#94a30db453df4c643d0fd566060d60a875d84754" + integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA== + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +to-arraybuffer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" + integrity sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M= + +to-buffer@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/to-buffer/-/to-buffer-1.1.1.tgz#493bd48f62d7c43fcded313a03dcadb2e1213a80" + integrity sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg== + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= + +to-object-path@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" + integrity sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68= + dependencies: + kind-of "^3.0.2" + +to-readable-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" + integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== + +to-regex-range@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" + integrity sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg= + dependencies: + is-number "^3.0.0" + repeat-string "^1.6.1" + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +to-regex@^3.0.1, to-regex@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" + integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw== + dependencies: + define-property "^2.0.2" + extend-shallow "^3.0.2" + regex-not "^1.0.2" + safe-regex "^1.1.0" + +toidentifier@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" + integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw== + +toml@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/toml/-/toml-3.0.0.tgz#342160f1af1904ec9d204d03a5d61222d762c5ee" + integrity sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w== + +tomlify-j0.4@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/tomlify-j0.4/-/tomlify-j0.4-3.0.0.tgz#99414d45268c3a3b8bf38be82145b7bba34b7473" + integrity sha512-2Ulkc8T7mXJ2l0W476YC/A209PR38Nw8PuaCNtk9uI3t1zzFdGQeWYGQvmj2PZkVvRC/Yoi4xQKMRnWc/N29tQ== + +touch@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/touch/-/touch-2.0.2.tgz#ca0b2a3ae3211246a61b16ba9e6cbf1596287164" + integrity sha512-qjNtvsFXTRq7IuMLweVgFxmEuQ6gLbRs2jQxL80TtZ31dEKWYIxRXquij6w6VimyDek5hD3PytljHmEtAs2u0A== + dependencies: + nopt "~1.0.10" + +tr46@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09" + integrity sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk= + dependencies: + punycode "^2.1.0" + +treeify@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/treeify/-/treeify-1.1.0.tgz#4e31c6a463accd0943879f30667c4fdaff411bb8" + integrity sha512-1m4RA7xVAJrSGrrXGs0L3YTwyvBs2S8PbRHaLZAkFw7JR8oIFwYtysxlBZhYIa7xSyiYJKZ3iGrrk55cGA3i9A== + +trim-lines@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/trim-lines/-/trim-lines-1.1.3.tgz#839514be82428fd9e7ec89e35081afe8f6f93115" + integrity sha512-E0ZosSWYK2mkSu+KEtQ9/KqarVjA9HztOSX+9FDdNacRAq29RRV6ZQNgob3iuW8Htar9vAfEa6yyt5qBAHZDBA== + +trim-repeated@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/trim-repeated/-/trim-repeated-1.0.0.tgz#e3646a2ea4e891312bf7eace6cfb05380bc01c21" + integrity sha1-42RqLqTokTEr9+rObPsFOAvAHCE= + dependencies: + escape-string-regexp "^1.0.2" + +trim-trailing-lines@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.3.tgz#7f0739881ff76657b7776e10874128004b625a94" + integrity sha512-4ku0mmjXifQcTVfYDfR5lpgV7zVqPg6zV9rdZmwOPqq0+Zq19xDqEgagqVbc4pOOShbncuAOIs59R3+3gcF3ZA== + +trim@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" + integrity sha1-WFhUf2spB1fulczMZm+1AITEYN0= + +triple-beam@^1.2.0, triple-beam@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/triple-beam/-/triple-beam-1.3.0.tgz#a595214c7298db8339eeeee083e4d10bd8cb8dd9" + integrity sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw== + +trough@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" + integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA== + +tryer@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/tryer/-/tryer-1.0.1.tgz#f2c85406800b9b0f74c9f7465b81eaad241252f8" + integrity sha512-c3zayb8/kWWpycWYg87P71E1S1ZL6b6IJxfb5fvsUgsf0S2MVGaDhDXXjDMpdCpfWXqptc+4mXwmiy1ypXqRAA== + +ts-pnp@^1.1.6: + version "1.2.0" + resolved "https://registry.yarnpkg.com/ts-pnp/-/ts-pnp-1.2.0.tgz#a500ad084b0798f1c3071af391e65912c86bca92" + integrity sha512-csd+vJOb/gkzvcCHgTGSChYpy5f1/XKNsmvBGO4JXS+z1v2HobugDz4s1IeFXM3wZB44uczs+eazB5Q/ccdhQw== + +tslib@^1, tslib@^1.10.0, tslib@^1.11.1, tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3: + version "1.13.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.13.0.tgz#c881e13cc7015894ed914862d276436fa9a47043" + integrity sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q== + +tslib@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.0.0.tgz#18d13fc2dce04051e20f074cc8387fd8089ce4f3" + integrity sha512-lTqkx847PI7xEDYJntxZH89L2/aXInsyF2luSafe/+0fHOMjlBNXdH6th7f70qxLDhul7KZK0zC8V5ZIyHl0/g== + +tsutils@^3.17.1: + version "3.17.1" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.17.1.tgz#ed719917f11ca0dee586272b2ac49e015a2dd759" + integrity sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g== + dependencies: + tslib "^1.8.1" + +tty-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" + integrity sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY= + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= + dependencies: + safe-buffer "^5.0.1" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= + dependencies: + prelude-ls "~1.1.2" + +type-fest@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.11.0.tgz#97abf0872310fed88a5c466b25681576145e33f1" + integrity sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ== + +type-fest@^0.3.0, type-fest@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.3.1.tgz#63d00d204e059474fe5e1b7c011112bbd1dc29e1" + integrity sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ== + +type-fest@^0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.5.2.tgz#d6ef42a0356c6cd45f49485c3b6281fc148e48a2" + integrity sha512-DWkS49EQKVX//Tbupb9TFa19c7+MK1XmzkrZUR8TAktmE/DizXoaoJV6TZ/tSIPXipqNiRI6CyAe7x69Jb6RSw== + +type-fest@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.6.0.tgz#8d2a2370d3df886eb5c90ada1c5bf6188acf838b" + integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg== + +type-fest@^0.8.0, type-fest@^0.8.1: + version "0.8.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" + integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== + +type-is@~1.6.17, type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== + dependencies: + media-typer "0.3.0" + mime-types "~2.1.24" + +typedarray-to-buffer@^3.1.5: + version "3.1.5" + resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" + integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== + dependencies: + is-typedarray "^1.0.0" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= + +typescript@^3.8.3: + version "3.9.7" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.9.7.tgz#98d600a5ebdc38f40cb277522f12dc800e9e25fa" + integrity sha512-BLbiRkiBzAwsjut4x/dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw== + +ua-parser-js@^0.7.18: + version "0.7.21" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.21.tgz#853cf9ce93f642f67174273cc34565ae6f308777" + integrity sha512-+O8/qh/Qj8CgC6eYBVBykMrNtp5Gebn4dlGD/kKXVkJNDwyrAwSIqwz8CDf+tsAIWVycKcku6gIXJ0qwx/ZXaQ== + +uid-safe@2.1.5: + version "2.1.5" + resolved "https://registry.yarnpkg.com/uid-safe/-/uid-safe-2.1.5.tgz#2b3d5c7240e8fc2e58f8aa269e5ee49c0857bd3a" + integrity sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA== + dependencies: + random-bytes "~1.0.0" + +unbzip2-stream@^1.0.9: + version "1.4.3" + resolved "https://registry.yarnpkg.com/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz#b0da04c4371311df771cdc215e87f2130991ace7" + integrity sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg== + dependencies: + buffer "^5.2.1" + through "^2.3.8" + +unescape@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/unescape/-/unescape-1.0.1.tgz#956e430f61cad8a4d57d82c518f5e6cc5d0dda96" + integrity sha512-O0+af1Gs50lyH1nUu3ZyYS1cRh01Q/kUKatTOkSs7jukXE6/NebucDVxyiDsA9AQ4JC1V1jUH9EO8JX2nMDgGQ== + dependencies: + extend-shallow "^2.0.1" + +unherit@^1.0.4: + version "1.1.3" + resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.3.tgz#6c9b503f2b41b262330c80e91c8614abdaa69c22" + integrity sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ== + dependencies: + inherits "^2.0.0" + xtend "^4.0.0" + +unicode-canonical-property-names-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" + integrity sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ== + +unicode-match-property-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" + integrity sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg== + dependencies: + unicode-canonical-property-names-ecmascript "^1.0.4" + unicode-property-aliases-ecmascript "^1.0.4" + +unicode-match-property-value-ecmascript@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz#0d91f600eeeb3096aa962b1d6fc88876e64ea531" + integrity sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ== + +unicode-property-aliases-ecmascript@^1.0.4: + version "1.1.0" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz#dd57a99f6207bedff4628abefb94c50db941c8f4" + integrity sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg== + +unified@9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-9.1.0.tgz#7ba82e5db4740c47a04e688a9ca8335980547410" + integrity sha512-VXOv7Ic6twsKGJDeZQ2wwPqXs2hM0KNu5Hkg9WgAZbSD1pxhZ7p8swqg583nw1Je2fhwHy6U8aEjiI79x1gvag== + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-buffer "^2.0.0" + is-plain-obj "^2.0.0" + trough "^1.0.0" + vfile "^4.0.0" + +unified@^8.4.2: + version "8.4.2" + resolved "https://registry.yarnpkg.com/unified/-/unified-8.4.2.tgz#13ad58b4a437faa2751a4a4c6a16f680c500fff1" + integrity sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA== + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-plain-obj "^2.0.0" + trough "^1.0.0" + vfile "^4.0.0" + +unified@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-9.0.0.tgz#12b099f97ee8b36792dbad13d278ee2f696eed1d" + integrity sha512-ssFo33gljU3PdlWLjNp15Inqb77d6JnJSfyplGJPT/a+fNRNyCBeveBAYJdO5khKdF6WVHa/yYCC7Xl6BDwZUQ== + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-buffer "^2.0.0" + is-plain-obj "^2.0.0" + trough "^1.0.0" + vfile "^4.0.0" + +union-value@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.1.tgz#0b6fe7b835aecda61c6ea4d4f02c14221e109847" + integrity sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg== + dependencies: + arr-union "^3.1.0" + get-value "^2.0.6" + is-extendable "^0.1.1" + set-value "^2.0.1" + +uniq@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" + integrity sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8= + +uniqs@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" + integrity sha1-/+3ks2slKQaW5uFl1KWe25mOawI= + +unique-filename@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" + integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ== + dependencies: + unique-slug "^2.0.0" + +unique-slug@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c" + integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w== + dependencies: + imurmurhash "^0.1.4" + +unique-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-1.0.0.tgz#9e1057cca851abb93398f8b33ae187b99caec11a" + integrity sha1-nhBXzKhRq7kzmPizOuGHuZyuwRo= + dependencies: + crypto-random-string "^1.0.0" + +unique-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d" + integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg== + dependencies: + crypto-random-string "^2.0.0" + +unist-builder@2.0.3, unist-builder@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-builder/-/unist-builder-2.0.3.tgz#77648711b5d86af0942f334397a33c5e91516436" + integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw== + +unist-util-generated@^1.0.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/unist-util-generated/-/unist-util-generated-1.1.5.tgz#1e903e68467931ebfaea386dae9ea253628acd42" + integrity sha512-1TC+NxQa4N9pNdayCYA1EGUOCAO0Le3fVp7Jzns6lnua/mYgwHo0tz5WUAfrdpNch1RZLHc61VZ1SDgrtNXLSw== + +unist-util-is@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-4.0.2.tgz#c7d1341188aa9ce5b3cff538958de9895f14a5de" + integrity sha512-Ofx8uf6haexJwI1gxWMGg6I/dLnF2yE+KibhD3/diOqY2TinLcqHXCV6OI5gFVn3xQqDH+u0M625pfKwIwgBKQ== + +unist-util-position@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-3.1.0.tgz#1c42ee6301f8d52f47d14f62bbdb796571fa2d47" + integrity sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA== + +unist-util-remove-position@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz#5d19ca79fdba712301999b2b73553ca8f3b352cc" + integrity sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA== + dependencies: + unist-util-visit "^2.0.0" + +unist-util-remove@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unist-util-remove/-/unist-util-remove-2.0.0.tgz#32c2ad5578802f2ca62ab808173d505b2c898488" + integrity sha512-HwwWyNHKkeg/eXRnE11IpzY8JT55JNM1YCwwU9YNCnfzk6s8GhPXrVBBZWiwLeATJbI7euvoGSzcy9M29UeW3g== + dependencies: + unist-util-is "^4.0.0" + +unist-util-stringify-position@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz#cce3bfa1cdf85ba7375d1d5b17bdc4cada9bd9da" + integrity sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g== + dependencies: + "@types/unist" "^2.0.2" + +unist-util-visit-children@^1.1.3, unist-util-visit-children@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/unist-util-visit-children/-/unist-util-visit-children-1.1.4.tgz#e8a087e58a33a2815f76ea1901c15dec2cb4b432" + integrity sha512-sA/nXwYRCQVRwZU2/tQWUqJ9JSFM1X3x7JIOsIgSzrFHcfVt6NkzDtKzyxg2cZWkCwGF9CO8x4QNZRJRMK8FeQ== + +unist-util-visit-parents@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-3.0.2.tgz#d4076af3011739c71d2ce99d05de37d545f4351d" + integrity sha512-yJEfuZtzFpQmg1OSCyS9M5NJRrln/9FbYosH3iW0MG402QbdbaB8ZESwUv9RO6nRfLAKvWcMxCwdLWOov36x/g== + dependencies: + "@types/unist" "^2.0.0" + unist-util-is "^4.0.0" + +unist-util-visit@2.0.3, unist-util-visit@^2.0.1, unist-util-visit@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-2.0.3.tgz#c3703893146df47203bb8a9795af47d7b971208c" + integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q== + dependencies: + "@types/unist" "^2.0.0" + unist-util-is "^4.0.0" + unist-util-visit-parents "^3.0.0" + +unist-util-visit@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-2.0.2.tgz#3843782a517de3d2357b4c193b24af2d9366afb7" + integrity sha512-HoHNhGnKj6y+Sq+7ASo2zpVdfdRifhTgX2KTU3B/sO/TTlZchp7E3S4vjRzDJ7L60KmrCPsQkVK3lEF3cz36XQ== + dependencies: + "@types/unist" "^2.0.0" + unist-util-is "^4.0.0" + unist-util-visit-parents "^3.0.0" + +universal-user-agent@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-4.0.1.tgz#fd8d6cb773a679a709e967ef8288a31fcc03e557" + integrity sha512-LnST3ebHwVL2aNe4mejI9IQh2HfZ1RLo8Io2HugSif8ekzD1TlWpHpColOB/eh8JHMLkGH3Akqf040I+4ylNxg== + dependencies: + os-name "^3.1.0" + +universal-user-agent@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-6.0.0.tgz#3381f8503b251c0d9cd21bc1de939ec9df5480ee" + integrity sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w== + +universalify@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" + integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== + +universalify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-1.0.0.tgz#b61a1da173e8435b2fe3c67d29b9adf8594bd16d" + integrity sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug== + +unixify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unixify/-/unixify-1.0.0.tgz#3a641c8c2ffbce4da683a5c70f03a462940c2090" + integrity sha1-OmQcjC/7zk2mg6XHDwOkYpQMIJA= + dependencies: + normalize-path "^2.1.1" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= + +unquote@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unquote/-/unquote-1.1.1.tgz#8fded7324ec6e88a0ff8b905e7c098cdc086d544" + integrity sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ= + +unset-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" + integrity sha1-g3aHP30jNRef+x5vw6jtDfyKtVk= + dependencies: + has-value "^0.3.1" + isobject "^3.0.0" + +unzip-response@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-2.0.1.tgz#d2f0f737d16b0615e72a6935ed04214572d56f97" + integrity sha1-0vD3N9FrBhXnKmk17QQhRXLVb5c= + +upath@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/upath/-/upath-1.2.0.tgz#8f66dbcd55a883acdae4408af8b035a5044c1894" + integrity sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg== + +update-notifier@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-2.5.0.tgz#d0744593e13f161e406acb1d9408b72cad08aff6" + integrity sha512-gwMdhgJHGuj/+wHJJs9e6PcCszpxR1b236igrOkUofGhqJuG+amlIKwApH1IW1WWl7ovZxsX49lMBWLxSdm5Dw== + dependencies: + boxen "^1.2.1" + chalk "^2.0.1" + configstore "^3.0.0" + import-lazy "^2.1.0" + is-ci "^1.0.10" + is-installed-globally "^0.1.0" + is-npm "^1.0.0" + latest-version "^3.0.0" + semver-diff "^2.0.0" + xdg-basedir "^3.0.0" + +update-notifier@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-4.1.0.tgz#4866b98c3bc5b5473c020b1250583628f9a328f3" + integrity sha512-w3doE1qtI0/ZmgeoDoARmI5fjDoT93IfKgEGqm26dGUOh8oNpaSTsGNdYRN/SjOuo10jcJGwkEL3mroKzktkew== + dependencies: + boxen "^4.2.0" + chalk "^3.0.0" + configstore "^5.0.1" + has-yarn "^2.1.0" + import-lazy "^2.1.0" + is-ci "^2.0.0" + is-installed-globally "^0.3.1" + is-npm "^4.0.0" + is-yarn-global "^0.3.0" + latest-version "^5.0.0" + pupa "^2.0.1" + semver-diff "^3.1.1" + xdg-basedir "^4.0.0" + +uri-js@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" + integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ== + dependencies: + punycode "^2.1.0" + +urix@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI= + +url-loader@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-4.1.0.tgz#c7d6b0d6b0fccd51ab3ffc58a78d32b8d89a7be2" + integrity sha512-IzgAAIC8wRrg6NYkFIJY09vtktQcsvU8V6HhtQj9PTefbYImzLB1hufqo4m+RyM5N3mLx5BqJKccgxJS+W3kqw== + dependencies: + loader-utils "^2.0.0" + mime-types "^2.1.26" + schema-utils "^2.6.5" + +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + integrity sha1-evjzA2Rem9eaJy56FKxovAYJ2nM= + dependencies: + prepend-http "^1.0.1" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= + dependencies: + prepend-http "^2.0.0" + +url-parse@^1.4.3: + version "1.4.7" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.4.7.tgz#a8a83535e8c00a316e403a5db4ac1b9b853ae278" + integrity sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +url-template@^2.0.8: + version "2.0.8" + resolved "https://registry.yarnpkg.com/url-template/-/url-template-2.0.8.tgz#fc565a3cccbff7730c775f5641f9555791439f21" + integrity sha1-/FZaPMy/93MMd19WQflVV5FDnyE= + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= + +url@0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/url/-/url-0.10.3.tgz#021e4d9c7705f21bbf37d03ceb58767402774c64" + integrity sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ= + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + integrity sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE= + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +use@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f" + integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== + +util-deprecate@^1.0.1, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= + +util.promisify@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.0.tgz#440f7165a459c9a16dc145eb8e72f35687097030" + integrity sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA== + dependencies: + define-properties "^1.1.2" + object.getownpropertydescriptors "^2.0.3" + +util.promisify@^1.0.1, util.promisify@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.1.tgz#6baf7774b80eeb0f7520d8b81d07982a59abbaee" + integrity sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.2" + has-symbols "^1.0.1" + object.getownpropertydescriptors "^2.1.0" + +util@0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" + integrity sha1-evsa/lCAUkZInj23/g7TeTNqwPk= + dependencies: + inherits "2.0.1" + +util@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/util/-/util-0.11.1.tgz#3236733720ec64bb27f6e26f421aaa2e1b588d61" + integrity sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ== + dependencies: + inherits "2.0.3" + +utila@^0.4.0, utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + integrity sha1-ihagXURWV6Oupe7MWxKk+lN5dyw= + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= + +uuid@3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== + +uuid@^3.3.2, uuid@^3.3.3, uuid@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + +uuid@^8.0.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.0.tgz#ab738085ca22dc9a8c92725e459b1d507df5d6ea" + integrity sha512-fX6Z5o4m6XsXBdli9g7DtWgAx+osMsRRZFKma1mIUsLCz6vRvv+pz5VNbyu9UEDzpMWulZfvpgb/cmDXVulYFQ== + +validate-npm-package-license@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" + integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +validate-npm-package-name@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e" + integrity sha1-X6kS2B630MdK/BQN5zF/DKffQ34= + dependencies: + builtins "^1.0.3" + +value-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-1.0.1.tgz#1e0b794c734c5c0cade179c437d356d931a34d6c" + integrity sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw== + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= + +vendors@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.4.tgz#e2b800a53e7a29b93506c3cf41100d16c4c4ad8e" + integrity sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w== + +vfile-location@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-3.0.1.tgz#d78677c3546de0f7cd977544c367266764d31bb3" + integrity sha512-yYBO06eeN/Ki6Kh1QAkgzYpWT1d3Qln+ZCtSbJqFExPl1S3y2qqotJQXoh6qEvl/jDlgpUJolBn3PItVnnZRqQ== + +vfile-message@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-2.0.4.tgz#5b43b88171d409eae58477d13f23dd41d52c371a" + integrity sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ== + dependencies: + "@types/unist" "^2.0.0" + unist-util-stringify-position "^2.0.0" + +vfile@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.1.1.tgz#282d28cebb609183ac51703001bc18b3e3f17de9" + integrity sha512-lRjkpyDGjVlBA7cDQhQ+gNcvB1BGaTHYuSOcY3S7OhDmBtnzX95FhtZZDecSTDm6aajFymyve6S5DN4ZHGezdQ== + dependencies: + "@types/unist" "^2.0.0" + is-buffer "^2.0.0" + replace-ext "1.0.0" + unist-util-stringify-position "^2.0.0" + vfile-message "^2.0.0" + +vlq@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vlq/-/vlq-1.0.1.tgz#c003f6e7c0b4c1edd623fd6ee50bbc0d6a1de468" + integrity sha512-gQpnTgkubC6hQgdIcRdYGDSDc+SaujOdyesZQMv6JlfQee/9Mp0Qhnys6WxDWvQnL5WZdT7o2Ul187aSt0Rq+w== + +vm-browserify@^1.0.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-1.1.2.tgz#78641c488b8e6ca91a75f511e7a3b32a86e5dda0" + integrity sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ== + +wait-file@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/wait-file/-/wait-file-1.0.5.tgz#377f48795f1765046a41bb0671c142ef8e509ae6" + integrity sha512-udLpJY/eOxlrMm3+XD1RLuF2oT9B7J7wiyR5/9xrvQymS6YR6trWvVhzOldHrVbLwyiRmLj9fcvsjzpSXeZHkw== + dependencies: + "@hapi/joi" "^15.1.0" + fs-extra "^8.1.0" + rx "^4.1.0" + +wait-port@^0.2.2: + version "0.2.9" + resolved "https://registry.yarnpkg.com/wait-port/-/wait-port-0.2.9.tgz#3905cf271b5dbe37a85c03b85b418b81cb24ee55" + integrity sha512-hQ/cVKsNqGZ/UbZB/oakOGFqic00YAMM5/PEj3Bt4vKarv2jWIWzDbqlwT94qMs/exAQAsvMOq99sZblV92zxQ== + dependencies: + chalk "^2.4.2" + commander "^3.0.2" + debug "^4.1.1" + +watchpack-chokidar2@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz#9948a1866cbbd6cb824dea13a7ed691f6c8ddff0" + integrity sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA== + dependencies: + chokidar "^2.1.8" + +watchpack@^1.6.1: + version "1.7.2" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.7.2.tgz#c02e4d4d49913c3e7e122c3325365af9d331e9aa" + integrity sha512-ymVbbQP40MFTp+cNMvpyBpBtygHnPzPkHqoIwRRj/0B8KhqQwV8LaKjtbaxF2lK4vl8zN9wCxS46IFCU5K4W0g== + dependencies: + graceful-fs "^4.1.2" + neo-async "^2.5.0" + optionalDependencies: + chokidar "^3.4.0" + watchpack-chokidar2 "^2.0.0" + +wbuf@^1.1.0, wbuf@^1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.3.tgz#c1d8d149316d3ea852848895cb6a0bfe887b87df" + integrity sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA== + dependencies: + minimalistic-assert "^1.0.0" + +wcwidth@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" + integrity sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g= + dependencies: + defaults "^1.0.3" + +web-namespaces@^1.0.0, web-namespaces@^1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" + integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== + +webidl-conversions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" + integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg== + +webpack-bundle-analyzer@^3.6.1: + version "3.8.0" + resolved "https://registry.yarnpkg.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-3.8.0.tgz#ce6b3f908daf069fd1f7266f692cbb3bded9ba16" + integrity sha512-PODQhAYVEourCcOuU+NiYI7WdR8QyELZGgPvB1y2tjbUpbmcQOt5Q7jEK+ttd5se0KSBKD9SXHCEozS++Wllmw== + dependencies: + acorn "^7.1.1" + acorn-walk "^7.1.1" + bfj "^6.1.1" + chalk "^2.4.1" + commander "^2.18.0" + ejs "^2.6.1" + express "^4.16.3" + filesize "^3.6.1" + gzip-size "^5.0.0" + lodash "^4.17.15" + mkdirp "^0.5.1" + opener "^1.5.1" + ws "^6.0.0" + +webpack-dev-middleware@^3.7.2: + version "3.7.2" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.7.2.tgz#0019c3db716e3fa5cecbf64f2ab88a74bab331f3" + integrity sha512-1xC42LxbYoqLNAhV6YzTYacicgMZQTqRd27Sim9wn5hJrX3I5nxYy1SxSd4+gjUFsz1dQFj+yEe6zEVmSkeJjw== + dependencies: + memory-fs "^0.4.1" + mime "^2.4.4" + mkdirp "^0.5.1" + range-parser "^1.2.1" + webpack-log "^2.0.0" + +webpack-dev-server@^3.11.0: + version "3.11.0" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.11.0.tgz#8f154a3bce1bcfd1cc618ef4e703278855e7ff8c" + integrity sha512-PUxZ+oSTxogFQgkTtFndEtJIPNmml7ExwufBZ9L2/Xyyd5PnOL5UreWe5ZT7IU25DSdykL9p1MLQzmLh2ljSeg== + dependencies: + ansi-html "0.0.7" + bonjour "^3.5.0" + chokidar "^2.1.8" + compression "^1.7.4" + connect-history-api-fallback "^1.6.0" + debug "^4.1.1" + del "^4.1.1" + express "^4.17.1" + html-entities "^1.3.1" + http-proxy-middleware "0.19.1" + import-local "^2.0.0" + internal-ip "^4.3.0" + ip "^1.1.5" + is-absolute-url "^3.0.3" + killable "^1.0.1" + loglevel "^1.6.8" + opn "^5.5.0" + p-retry "^3.0.1" + portfinder "^1.0.26" + schema-utils "^1.0.0" + selfsigned "^1.10.7" + semver "^6.3.0" + serve-index "^1.9.1" + sockjs "0.3.20" + sockjs-client "1.4.0" + spdy "^4.0.2" + strip-ansi "^3.0.1" + supports-color "^6.1.0" + url "^0.11.0" + webpack-dev-middleware "^3.7.2" + webpack-log "^2.0.0" + ws "^6.2.1" + yargs "^13.3.2" + +webpack-log@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-2.0.0.tgz#5b7928e0637593f119d32f6227c1e0ac31e1b47f" + integrity sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg== + dependencies: + ansi-colors "^3.0.0" + uuid "^3.3.2" + +webpack-merge@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-4.2.2.tgz#a27c52ea783d1398afd2087f547d7b9d2f43634d" + integrity sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g== + dependencies: + lodash "^4.17.15" + +webpack-sources@^1.1.0, webpack-sources@^1.4.0, webpack-sources@^1.4.1, webpack-sources@^1.4.3: + version "1.4.3" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933" + integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ== + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + +webpack@^4.41.2: + version "4.43.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.43.0.tgz#c48547b11d563224c561dad1172c8aa0b8a678e6" + integrity sha512-GW1LjnPipFW2Y78OOab8NJlCflB7EFskMih2AHdvjbpKMeDJqEgSx24cXXXiPS65+WSwVyxtDsJH6jGX2czy+g== + dependencies: + "@webassemblyjs/ast" "1.9.0" + "@webassemblyjs/helper-module-context" "1.9.0" + "@webassemblyjs/wasm-edit" "1.9.0" + "@webassemblyjs/wasm-parser" "1.9.0" + acorn "^6.4.1" + ajv "^6.10.2" + ajv-keywords "^3.4.1" + chrome-trace-event "^1.0.2" + enhanced-resolve "^4.1.0" + eslint-scope "^4.0.3" + json-parse-better-errors "^1.0.2" + loader-runner "^2.4.0" + loader-utils "^1.2.3" + memory-fs "^0.4.1" + micromatch "^3.1.10" + mkdirp "^0.5.3" + neo-async "^2.6.1" + node-libs-browser "^2.2.1" + schema-utils "^1.0.0" + tapable "^1.1.3" + terser-webpack-plugin "^1.4.3" + watchpack "^1.6.1" + webpack-sources "^1.4.1" + +webpackbar@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/webpackbar/-/webpackbar-4.0.0.tgz#ee7a87f16077505b5720551af413c8ecd5b1f780" + integrity sha512-k1qRoSL/3BVuINzngj09nIwreD8wxV4grcuhHTD8VJgUbGcy8lQSPqv+bM00B7F+PffwIsQ8ISd4mIwRbr23eQ== + dependencies: + ansi-escapes "^4.2.1" + chalk "^2.4.2" + consola "^2.10.0" + figures "^3.0.0" + pretty-time "^1.1.0" + std-env "^2.2.1" + text-table "^0.2.0" + wrap-ansi "^6.0.0" + +websocket-driver@0.6.5: + version "0.6.5" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36" + integrity sha1-XLJVbOuF9Dc8bYI4qmkchFThOjY= + dependencies: + websocket-extensions ">=0.1.1" + +websocket-driver@>=0.5.1: + version "0.7.4" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.4.tgz#89ad5295bbf64b480abcba31e4953aca706f5760" + integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg== + dependencies: + http-parser-js ">=0.5.1" + safe-buffer ">=5.1.0" + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.4" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42" + integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg== + +well-known-symbols@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/well-known-symbols/-/well-known-symbols-2.0.0.tgz#e9c7c07dbd132b7b84212c8174391ec1f9871ba5" + integrity sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q== + +whatwg-fetch@>=0.10.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.2.0.tgz#8e134f701f0a4ab5fda82626f113e2b647fd16dc" + integrity sha512-SdGPoQMMnzVYThUbSrEvqTlkvC1Ux27NehaJ/GUHBfNrh5Mjg+1/uRyFMwVnxO2MrikMWvWAqUGgQOfVU4hT7w== + +whatwg-url@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.1.0.tgz#c2c492f1eca612988efd3d2266be1b9fc6170d06" + integrity sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg== + dependencies: + lodash.sortby "^4.7.0" + tr46 "^1.0.1" + webidl-conversions "^4.0.2" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= + +which@^1.2.9, which@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + +which@^2.0.1, which@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +widest-line@^2.0.0, widest-line@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-2.0.1.tgz#7438764730ec7ef4381ce4df82fb98a53142a3fc" + integrity sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA== + dependencies: + string-width "^2.1.1" + +widest-line@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" + integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== + dependencies: + string-width "^4.0.0" + +windows-release@^3.1.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/windows-release/-/windows-release-3.3.1.tgz#cb4e80385f8550f709727287bf71035e209c4ace" + integrity sha512-Pngk/RDCaI/DkuHPlGTdIkDiTAnAkyMjoQMZqRsxydNl1qGXNIoZrB7RK8g53F2tEgQBMqQJHQdYZuQEEAu54A== + dependencies: + execa "^1.0.0" + +winston-transport@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/winston-transport/-/winston-transport-4.4.0.tgz#17af518daa690d5b2ecccaa7acf7b20ca7925e59" + integrity sha512-Lc7/p3GtqtqPBYYtS6KCN3c77/2QCev51DvcJKbkFPQNoj1sinkGwLGFDxkXY9J6p9+EPnYs+D90uwbnaiURTw== + dependencies: + readable-stream "^2.3.7" + triple-beam "^1.2.0" + +winston@^3.2.1: + version "3.3.3" + resolved "https://registry.yarnpkg.com/winston/-/winston-3.3.3.tgz#ae6172042cafb29786afa3d09c8ff833ab7c9170" + integrity sha512-oEXTISQnC8VlSAKf1KYSSd7J6IWuRPQqDdo8eoRNaYKLvwSb5+79Z3Yi1lrl6KDpU6/VWaxpakDAtb1oQ4n9aw== + dependencies: + "@dabh/diagnostics" "^2.0.2" + async "^3.1.0" + is-stream "^2.0.0" + logform "^2.2.0" + one-time "^1.0.0" + readable-stream "^3.4.0" + stack-trace "0.0.x" + triple-beam "^1.3.0" + winston-transport "^4.4.0" + +word-wrap@~1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" + integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== + +worker-farm@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/worker-farm/-/worker-farm-1.7.0.tgz#26a94c5391bbca926152002f69b84a4bf772e5a8" + integrity sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw== + dependencies: + errno "~0.1.7" + +worker-rpc@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/worker-rpc/-/worker-rpc-0.1.1.tgz#cb565bd6d7071a8f16660686051e969ad32f54d5" + integrity sha512-P1WjMrUB3qgJNI9jfmpZ/htmBEjFh//6l/5y8SD9hg1Ef5zTTVVoRjTrTEzPrNBQvmhMxkoTsjOXN10GWU7aCg== + dependencies: + microevent.ts "~0.1.1" + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + +wrap-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-4.0.0.tgz#b3570d7c70156159a2d42be5cc942e957f7b1131" + integrity sha512-uMTsj9rDb0/7kk1PbcbCcwvHUxp60fGDB/NNXpVa0Q+ic/e7y5+BwTxKfQ33VYgDppSwi/FBzpetYzo8s6tfbg== + dependencies: + ansi-styles "^3.2.0" + string-width "^2.1.1" + strip-ansi "^4.0.0" + +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== + dependencies: + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + +wrap-ansi@^6.0.0, wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +write-file-atomic@^2.0.0: + version "2.4.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.3.tgz#1fd2e9ae1df3e75b8d8c367443c692d4ca81f481" + integrity sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ== + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +write-file-atomic@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" + integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== + dependencies: + imurmurhash "^0.1.4" + is-typedarray "^1.0.0" + signal-exit "^3.0.2" + typedarray-to-buffer "^3.1.5" + +ws@^6.0.0, ws@^6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb" + integrity sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA== + dependencies: + async-limiter "~1.0.0" + +xdg-basedir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-3.0.0.tgz#496b2cc109eca8dbacfe2dc72b603c17c5870ad4" + integrity sha1-SWsswQnsqNus/i3HK2A8F8WHCtQ= + +xdg-basedir@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" + integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== + +xml2js@0.4.19: + version "0.4.19" + resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.19.tgz#686c20f213209e94abf0d1bcf1efaa291c7827a7" + integrity sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q== + dependencies: + sax ">=0.6.0" + xmlbuilder "~9.0.1" + +xmlbuilder@^13.0.0: + version "13.0.2" + resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-13.0.2.tgz#02ae33614b6a047d1c32b5389c1fdacb2bce47a7" + integrity sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ== + +xmlbuilder@~9.0.1: + version "9.0.7" + resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-9.0.7.tgz#132ee63d2ec5565c557e20f4c22df9aca686b10d" + integrity sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0= + +xtend@^4.0.0, xtend@^4.0.1, xtend@~4.0.0, xtend@~4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== + +"y18n@^3.2.1 || ^4.0.0", y18n@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" + integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w== + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI= + +yallist@^3.0.2: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yaml@^1.7.2, yaml@^1.8.3: + version "1.10.0" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.0.tgz#3b593add944876077d4d683fee01081bd9fff31e" + integrity sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg== + +yargs-parser@^11.1.1: + version "11.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-11.1.1.tgz#879a0865973bca9f6bab5cbdf3b1c67ec7d3bcf4" + integrity sha512-C6kB/WJDiaxONLJQnF8ccx9SEeoTTLek8RVbaOIsrAUS8VrBEXfmeSnCZxygc+XC2sNMBIwOOnfcxiynjHsVSQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^13.1.2: + version "13.1.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" + integrity sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^18.1.2: + version "18.1.3" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" + integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs@^12.0.5: + version "12.0.5" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.5.tgz#05f5997b609647b64f66b81e3b4b10a368e7ad13" + integrity sha512-Lhz8TLaYnxq/2ObqHDql8dX8CJi97oHxrjUcYtzKbbykPtVW9WB+poxI+NM2UIzsMgNCZTIf0AQwsjK5yMAqZw== + dependencies: + cliui "^4.0.0" + decamelize "^1.2.0" + find-up "^3.0.0" + get-caller-file "^1.0.1" + os-locale "^3.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1 || ^4.0.0" + yargs-parser "^11.1.1" + +yargs@^13.3.2: + version "13.3.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" + integrity sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw== + dependencies: + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.2" + +yargs@^15.3.0, yargs@^15.3.1, yargs@^15.4.1: + version "15.4.1" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" + +yarn@^1.21.1: + version "1.22.4" + resolved "https://registry.yarnpkg.com/yarn/-/yarn-1.22.4.tgz#01c1197ca5b27f21edc8bc472cd4c8ce0e5a470e" + integrity sha512-oYM7hi/lIWm9bCoDMEWgffW8aiNZXCWeZ1/tGy0DWrN6vmzjCXIKu2Y21o8DYVBUtiktwKcNoxyGl/2iKLUNGA== + +yauzl@^2.4.2: + version "2.10.0" + resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.10.0.tgz#c7eb17c93e112cb1086fa6d8e51fb0667b79a5f9" + integrity sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk= + dependencies: + buffer-crc32 "~0.2.3" + fd-slicer "~1.1.0" + +zip-stream@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/zip-stream/-/zip-stream-3.0.1.tgz#cb8db9d324a76c09f9b76b31a12a48638b0b9708" + integrity sha512-r+JdDipt93ttDjsOVPU5zaq5bAyY+3H19bDrThkvuVxC0xMQzU1PJcS6D+KrP3u96gH9XLomcHPb+2skoDjulQ== + dependencies: + archiver-utils "^2.1.0" + compress-commons "^3.0.0" + readable-stream "^3.6.0" + +zwitch@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920" + integrity sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw== diff --git a/examples/concertbot/actions.py b/examples/concertbot/actions.py index 4a1e73798754..e218939f5881 100644 --- a/examples/concertbot/actions.py +++ b/examples/concertbot/actions.py @@ -12,7 +12,7 @@ def run(self, dispatcher, tracker, domain): {"artist": "Katy Perry", "reviews": 5.0}, ] description = ", ".join([c["artist"] for c in concerts]) - dispatcher.utter_message("{}".format(description)) + dispatcher.utter_message(text=f"{description}") return [SlotSet("concerts", concerts)] @@ -25,9 +25,9 @@ def run(self, dispatcher, tracker, domain): {"name": "Big Arena", "reviews": 4.5}, {"name": "Rock Cellar", "reviews": 5.0}, ] - dispatcher.utter_message("here are some venues I found") + dispatcher.utter_message(text="here are some venues I found") description = ", ".join([c["name"] for c in venues]) - dispatcher.utter_message("{}".format(description)) + dispatcher.utter_message(text=f"{description}") return [SlotSet("venues", venues)] @@ -37,7 +37,7 @@ def name(self): def run(self, dispatcher, tracker, domain): concerts = tracker.get_slot("concerts") - dispatcher.utter_message("concerts from slots: {}".format(concerts)) + dispatcher.utter_message(text=f"concerts from slots: {concerts}") return [] @@ -47,5 +47,5 @@ def name(self): def run(self, dispatcher, tracker, domain): venues = tracker.get_slot("venues") - dispatcher.utter_message("venues from slots: {}".format(venues)) + dispatcher.utter_message(text=f"venues from slots: {venues}") return [] diff --git a/examples/concertbot/config.yml b/examples/concertbot/config.yml index 8797ca264f7a..386f1a8cab3e 100644 --- a/examples/concertbot/config.yml +++ b/examples/concertbot/config.yml @@ -1,12 +1,25 @@ language: en -pipeline: supervised_embeddings +pipeline: + - name: "WhitespaceTokenizer" + - name: "RegexFeaturizer" + - name: "LexicalSyntacticFeaturizer" + - name: "CountVectorsFeaturizer" + - name: "CountVectorsFeaturizer" + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: "DIETClassifier" + - name: FallbackClassifier + threshold: 0.4 + ambiguity_threshold: 0.1 + - name: "EntitySynonymMapper" policies: - - name: KerasPolicy + - name: TEDPolicy + max_history: 5 epochs: 200 batch_size: 50 max_training_samples: 300 - - name: FallbackPolicy - name: MemoizationPolicy - - name: MappingPolicy + - name: RulePolicy diff --git a/examples/concertbot/data/nlu.yaml b/examples/concertbot/data/nlu.yaml new file mode 100644 index 000000000000..d0ecdd6feb8d --- /dev/null +++ b/examples/concertbot/data/nlu.yaml @@ -0,0 +1,41 @@ +version: "2.0" + +nlu: + - intent: greet + examples: | + - hi + - hello + - how are you + - good morning + - good evening + - hey + + - intent: goodbye + examples: | + - bye + - goodbye + - ciao + + - intent: thankyou + examples: | + - thanks + - thank you + - thanks friend + + - intent: search_concerts + examples: | + - Find me some good concerts + - Show me concerts + - search concerts + + - intent: search_venues + examples: | + - Find me some good venues + - Show me venues + - search venues + + - intent: compare_reviews + examples: | + - compare reviews + - show me a comparison of the reviews + diff --git a/examples/concertbot/data/rules.yaml b/examples/concertbot/data/rules.yaml new file mode 100644 index 000000000000..4afa5a6c96c5 --- /dev/null +++ b/examples/concertbot/data/rules.yaml @@ -0,0 +1,24 @@ +version: "2.0" + +rules: + - rule: fallback + steps: + - intent: nlu_fallback + - action: utter_default + - rule: greet + steps: + - intent: greet + - action: utter_greet + - rule: thankyou + steps: + - intent: thankyou + - action: utter_youarewelcome + - rule: goodbye + steps: + - intent: goodbye + - action: utter_goodbye + - rule: challenge + steps: + - intent: bot_challenge + - action: utter_iamabot + diff --git a/examples/concertbot/data/stories.md b/examples/concertbot/data/stories.md deleted file mode 100644 index 70d14d111fd0..000000000000 --- a/examples/concertbot/data/stories.md +++ /dev/null @@ -1,39 +0,0 @@ -## greet -* greet - - utter_greet - -## happy -* thankyou - - utter_youarewelcome - -## goodbye -* goodbye - - utter_goodbye - -## venue_search -* search_venues - - action_search_venues - - slot{"venues": [{"name": "Big Arena", "reviews": 4.5}]} - -## concert_search -* search_concerts - - action_search_concerts - - slot{"concerts": [{"artist": "Foo Fighters", "reviews": 4.5}]} - -## compare_reviews_venues -* search_venues - - action_search_venues - - slot{"venues": [{"name": "Big Arena", "reviews": 4.5}]} -* compare_reviews - - action_show_venue_reviews - -## compare_reviews_concerts -* search_concerts - - action_search_concerts - - slot{"concerts": [{"artist": "Foo Fighters", "reviews": 4.5}]} -* compare_reviews - - action_show_concert_reviews - -## bot challenge -* bot_challenge - - utter_iamabot diff --git a/examples/concertbot/data/stories.yaml b/examples/concertbot/data/stories.yaml new file mode 100644 index 000000000000..42be80fed7f6 --- /dev/null +++ b/examples/concertbot/data/stories.yaml @@ -0,0 +1,34 @@ +version: "2.0" + +stories: + - story: search_venues + steps: + - intent: search_venues + - action: action_search_venues + - slot_was_set: + - venues: [{"name": "Big Arena", "reviews": 4.5}] + + - story: search_concerts + steps: + - intent: search_concerts + - action: action_search_concerts + - slot_was_set: + - concerts: [{"artist": "Foo Fighters", "reviews": 4.5}] + + - story: compare_reviews_venues + steps: + - intent: search_venues + - action: action_search_venues + - slot_was_set: + - venues: [{"name": "Big Arena", "reviews": 4.5}] + - intent: compare_reviews + - action: action_show_venue_reviews + + - story: compare_reviews_concerts + steps: + - intent: search_concerts + - action: action_search_concerts + - slot_was_set: + - concerts: [{"artist": "Foo Fighters", "reviews": 4.5}] + - intent: compare_reviews + - action: action_show_concert_reviews diff --git a/examples/concertbot/domain.yml b/examples/concertbot/domain.yml index bbcd2a201dc5..2deb1f978de5 100644 --- a/examples/concertbot/domain.yml +++ b/examples/concertbot/domain.yml @@ -1,3 +1,5 @@ +version: "2.0" + slots: concerts: type: list @@ -12,11 +14,12 @@ intents: - search_venues - compare_reviews - bot_challenge + - nlu_fallback entities: - name -templates: +responses: utter_greet: - text: "hey there!" utter_goodbye: @@ -29,12 +32,11 @@ templates: - text: "I am a bot, powered by Rasa." actions: - - utter_default - - utter_greet - - utter_goodbye - - utter_youarewelcome - action_search_concerts - action_search_venues - action_show_concert_reviews - action_show_venue_reviews - - utter_iamabot + +session_config: + session_expiration_time: 60 # value in minutes + carry_over_slots_to_new_session: true diff --git a/examples/formbot/actions.py b/examples/formbot/actions.py index 935666a77eac..8ab8310083b8 100644 --- a/examples/formbot/actions.py +++ b/examples/formbot/actions.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -from typing import Dict, Text, Any, List, Union, Optional +from typing import Dict, Text, Any, List, Union from rasa_sdk import Tracker from rasa_sdk.executor import CollectingDispatcher @@ -7,16 +6,16 @@ class RestaurantForm(FormAction): - """Example of a custom form action""" + """Example of a custom form action.""" def name(self) -> Text: - """Unique identifier of the form""" + """Unique identifier of the form.""" return "restaurant_form" @staticmethod def required_slots(tracker: Tracker) -> List[Text]: - """A list of required slots that the form has to fill""" + """A list of required slots that the form has to fill.""" return ["cuisine", "num_people", "outdoor_seating", "preferences", "feedback"] @@ -25,15 +24,14 @@ def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: - an extracted entity - intent: value pairs - a whole message - or a list of them, where a first match will be picked""" + or a list of them, where a first match will be picked.""" return { "cuisine": self.from_entity(entity="cuisine", not_intent="chitchat"), "num_people": [ self.from_entity( - entity="num_people", intent=["inform", "request_restaurant"] + entity="number", intent=["inform", "request_restaurant"] ), - self.from_entity(entity="number"), ], "outdoor_seating": [ self.from_entity(entity="seating"), @@ -47,10 +45,9 @@ def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: "feedback": [self.from_entity(entity="feedback"), self.from_text()], } - # USED FOR DOCS: do not rename without updating in docs @staticmethod def cuisine_db() -> List[Text]: - """Database of supported cuisines""" + """Database of supported cuisines.""" return [ "caribbean", @@ -64,7 +61,7 @@ def cuisine_db() -> List[Text]: @staticmethod def is_int(string: Text) -> bool: - """Check if a string is an integer""" + """Check if a string is an integer.""" try: int(string) @@ -72,7 +69,6 @@ def is_int(string: Text) -> bool: except ValueError: return False - # USED FOR DOCS: do not rename without updating in docs def validate_cuisine( self, value: Text, @@ -86,7 +82,7 @@ def validate_cuisine( # validation succeeded, set the value of the "cuisine" slot to value return {"cuisine": value} else: - dispatcher.utter_template("utter_wrong_cuisine", tracker) + dispatcher.utter_message(template="utter_wrong_cuisine") # validation failed, set this slot to None, meaning the # user will be asked for the slot again return {"cuisine": None} @@ -103,7 +99,7 @@ def validate_num_people( if self.is_int(value) and int(value) > 0: return {"num_people": value} else: - dispatcher.utter_template("utter_wrong_num_people", tracker) + dispatcher.utter_message(template="utter_wrong_num_people") # validation failed, set slot to None return {"num_people": None} @@ -124,12 +120,12 @@ def validate_outdoor_seating( # convert "in..." to False return {"outdoor_seating": False} else: - dispatcher.utter_template("utter_wrong_outdoor_seating", tracker) + dispatcher.utter_message(template="utter_wrong_outdoor_seating") # validation failed, set slot to None return {"outdoor_seating": None} else: - # affirm/deny was picked up as T/F + # affirm/deny was picked up as True/False by the from_intent mapping return {"outdoor_seating": value} def submit( @@ -138,9 +134,7 @@ def submit( tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict]: - """Define what the form has to do - after all required slots are filled""" + """Define what the form has to do after all required slots are filled.""" - # utter submit template - dispatcher.utter_template("utter_submit", tracker) + dispatcher.utter_message(template="utter_submit") return [] diff --git a/examples/formbot/config.yml b/examples/formbot/config.yml index 3aa0e7577759..dff4a5a14977 100644 --- a/examples/formbot/config.yml +++ b/examples/formbot/config.yml @@ -2,18 +2,18 @@ language: en pipeline: - name: WhitespaceTokenizer - - name: CRFEntityExtractor - - name: EntitySynonymMapper + - name: LexicalSyntacticFeaturizer - name: CountVectorsFeaturizer token_pattern: (?u)\b\w+\b - - name: EmbeddingIntentClassifier - name: DucklingHTTPExtractor url: http://localhost:8000 dimensions: - number + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper policies: - - name: FallbackPolicy - - name: MemoizationPolicy - - name: FormPolicy - - name: MappingPolicy + - name: TEDPolicy + - name: AugmentedMemoizationPolicy + - name: RulePolicy diff --git a/examples/formbot/data/nlu.yml b/examples/formbot/data/nlu.yml new file mode 100644 index 000000000000..1cace26e0b44 --- /dev/null +++ b/examples/formbot/data/nlu.yml @@ -0,0 +1,316 @@ +nlu: +- intent: greet + examples: | + - Hi + - Hey + - Hi bot + - Hey bot + - Hello + - Good morning + - hi again + - hi folks + - hi Mister + - hi pal! + - hi there + - greetings + - hello everybody + - hello is anybody there + - hello robot + - hallo + - heeey + - hi hi + - hey + - hey hey + - hello there + - hi + - hello + - yo + - hola + - hi? + - hey bot! + - hello friend + +- intent: request_restaurant + examples: | + - im looking for a restaurant + - can i get [swedish](cuisine) food in any area + - a restaurant that serves [caribbean](cuisine) food + - id like a restaurant + - im looking for a restaurant that serves [mediterranean](cuisine) food + - can i find a restaurant that serves [chinese](cuisine) + - i am looking for any place that serves [indonesian](cuisine) food for three + - i need to find a restaurant + - uh im looking for a restaurant that serves [kosher](cuisine) food + - uh can i find a restaurant and it should serve [brazilian](cuisine) food + - im looking for a restaurant serving [italian](cuisine) food + - restaurant please + - i'd like to book a table for two with [spanish](cuisine) cuisine + - i need a table for 4 + - book me a table for three at the [italian](cuisine) restaurant + - can you please book a table for 5? + - I would like to book a table for 2 + - looking for a table at the [mexican](cuisine) restaurant for five + - find me a table for 7 people + - Can I get a table for four at the place which server [greek](cuisine) food? + +- intent: affirm + examples: | + - yeah a cheap restaurant serving international food + - correct + - ye + - uh yes + - let's do it + - yeah + - uh yes + - um yes + - yes knocking + - that's correct + - yes yes + - right + - yea + - yes + - yes right + - yes and i dont care + - right on + - i love that + +- intent: deny + examples: | + - no + - no new selection + - no thanks + - no thank you + - uh no + - breath no + - do you have something else + - no this does not work for me + +- intent: inform + examples: | + - [afghan](cuisine) food + - how bout [asian oriental](cuisine) + - what about [indian](cuisine) food + - uh how about [turkish](cuisine) type of food + - um [english](cuisine) + - im looking for [tuscan](cuisine) food + - id like [moroccan](cuisine) food + - [seafood](cuisine) + - [french](cuisine) food + - serves [british](cuisine) food + - id like [canapes](cuisine) + - serving [jamaican](cuisine) food + - um what about [italian](cuisine) food + - im looking for [corsica](cuisine) food + - im looking for [world](cuisine) food + - serves [french](cuisine) food + - how about [indian](cuisine) food + - can i get [chinese](cuisine) food + - [irish](cuisine) food + - [english](cuisine) food + - [spanish](cuisine) food + - how bout one that serves [portuguese](cuisine) food and is cheap + - [german](cuisine) + - [korean](cuisine) food + - im looking for [romanian](cuisine) food + - serves [canapes](cuisine) food + - [gastropub](cuisine) + - i want [french](cuisine) food + - how about [modern european](cuisine) type of food + - it should serve [scandinavian](cuisine) food + - how [european](cuisine) + - how about [european](cuisine) food + - serves [traditional](cuisine) food + - [indonesian](cuisine) food + - [modern european](cuisine) + - serves [brazilian](cuisine) + - i would like [modern european](cuisine) food + - looking for [lebanese](cuisine) food + - [portuguese](cuisine) + - [european](cuisine) + - i want [polish](cuisine) food + - id like [thai](cuisine) + - i want to find [moroccan](cuisine) food + - [afghan](cuisine) + - [scottish](cuisine) food + - how about [vietnamese](cuisine) + - hi im looking for [mexican](cuisine) food + - how about [indian](cuisine) type of food + - [polynesian](cuisine) food + - [mexican](cuisine) + - instead could it be for four people + - any [japanese](cuisine) food + - what about [thai](cuisine) food + - how about [asian oriental](cuisine) food + - im looking for [japanese](cuisine) food + - im looking for [belgian](cuisine) food + - im looking for [turkish](cuisine) food + - serving [corsica](cuisine) food + - serving [gastro pub](cuisine:gastropub) + - is there [british](cuisine) food + - [world](cuisine) food + - im looking for something serves [japanese](cuisine) food + - id like a [greek](cuisine) + - im looking for [malaysian](cuisine) food + - i want to find [world](cuisine) food + - serves [pan asian](cuisine:asian) food + - looking for [afghan](cuisine) food + - that serves [portuguese](cuisine) food + - [asian oriental](cuisine:asian) food + - [russian](cuisine) food + - [corsica](cuisine) + - [asian oriental](cuisine:asian) + - serving [basque](cuisine) food + - how about [italian](cuisine) + - looking for [spanish](cuisine) food in the center of town + - it should serve [gastropub](cuisine) food + - [welsh](cuisine) food + - i want [vegetarian](cuisine) food + - im looking for [swedish](cuisine) food + - um how about [chinese](cuisine) food + - [world](cuisine) food + - can i have a [seafood](cuisine) please + - how about [italian](cuisine) food + - how about [korean](cuisine) + - [corsica](cuisine) food + - [scandinavian](cuisine) + - [vegetarian](cuisine) food + - what about [italian](cuisine) + - how about [portuguese](cuisine) food + - serving [french](cuisine) food + - [tuscan](cuisine) food + - how about uh [gastropub](cuisine) + - im looking for [creative](cuisine) food + - im looking for [malaysian](cuisine) food + - im looking for [unusual](cuisine) food + - [danish](cuisine) food + - how about [spanish](cuisine) food + - im looking for [vietnamese](cuisine) food + - [spanish](cuisine) + - a restaurant serving [romanian](cuisine) food + - im looking for [lebanese](cuisine) food + - [italian](cuisine) food + - a restaurant with [afghan](cuisine) food + - im looking for [traditional](cuisine) food + - uh i want [cantonese](cuisine) food + - im looking for [thai](cuisine) + - i want to seat [outside](seating) + - i want to seat [inside](seating) + - i want to seat [outdoor](seating) + - i want to seat [indoor](seating) + - let's go [inside](seating) + - [inside](seating) + - [outdoor](seating) + - prefer sitting [indoors](seating) + - I would like to seat [inside](seating) please + - I prefer sitting [outside](seating) + - my feedback is [good](feedback) + - my feedback is [great](feedback) + - it was [terrible](feedback) + - i consider it [success](feedback) + - you are [awful](feedback) + - for ten people + - 2 people + - for three people + - just one person + - book for seven people + - 2 please + - nine people + +- intent: thankyou + examples: | + - um thank you good bye + - okay cool uh good bye thank you + - okay thank you good bye + - you rock + - and thats all thank you and good bye + - thank you and good bye + - sorry about my mistakes thank you good bye + - noise thank you good bye + - thank you goodbye noise + - okay thank you goodbye + - uh thank you good bye + - thank you goodbye + - thank you goodbye noise thank you goodbye + - breath thank you goodbye + - thank you + - okay thank you + - thanks goodbye + - ah thank you goodbye + - thank you noise + - thank you good bye + - breath thank you very much goodbye + - thanks + - noise thank you goodbye + - unintelligible thank you goodbye + - uh okay thank you good bye + - thank you bye + - um okay thank you good bye + +- intent: chitchat + examples: | + - can you share your boss with me? + - i want to get to know your owner + - i want to know the company which designed you + - i want to know the company which generated you + - i want to know the company which invented you + - i want to know who invented you + - May I ask who invented you? + - please tell me the company who created you + - please tell me who created you + - tell me more about your creators + - tell me more about your founders + - Ahoy matey how are you? + - are you alright + - are you having a good day + - Are you ok? + - are you okay + - Do you feel good? + - how are things going + - how are things with you? + - How are things? + - how are you + - how are you doing + - how are you doing this morning + - how are you feeling + - how are you today + - How are you? + - How is the weather today? + - What's the weather like? + - How is the weather? + - What is the weather at your place? + - Do you have good weather? + - Is it raining? + - What's it like out there? + - Is it hot or cold? + - Beautiful day, isn't it? + - What's the weather forecast? + - Is it quite breezy outside? + +- intent: stop + examples: | + - ok then you cant help me + - that was shit, you're not helping + - you can't help me + - you can't help me with what i need + - i guess you can't help me then + - ok i guess you can't help me + - that's not what i want + - ok, but that doesnt help me + - this is leading to nothing + - this conversation is not really helpful + - you cannot help me with what I want + - I think you cant help me + - hm i don't think you can do what i want + - stop + - stop go back + - do you get anything? + - and you call yourself bot company? pff + - and that's it? + - nothing else? + +- intent: bot_challenge + examples: | + - are you a bot? + - are you a human? + - am I talking to a bot? + - am I talking to a human? diff --git a/examples/formbot/data/rules.yml b/examples/formbot/data/rules.yml new file mode 100644 index 000000000000..5c21d765f672 --- /dev/null +++ b/examples/formbot/data/rules.yml @@ -0,0 +1,35 @@ +rules: + - rule: Greet user + steps: + - intent: greet + - action: utter_greet + + - rule: Thank you + steps: + - intent: thankyou + - action: utter_noworries + + - rule: Bot challenge + steps: + - intent: bot_challenge + - action: utter_iamabot + + - rule: activate restaurant form + steps: + - intent: request_restaurant + - action: restaurant_form + - active_loop: restaurant_form + + - rule: Chitchat + steps: + - intent: chitchat + - action: utter_chitchat + + - rule: submit form + condition: + - active_loop: restaurant_form + steps: + - action: restaurant_form + - active_loop: null + - action: utter_submit + - action: utter_slots_values diff --git a/examples/formbot/data/stories.md b/examples/formbot/data/stories.md deleted file mode 100644 index 6fc9be0da674..000000000000 --- a/examples/formbot/data/stories.md +++ /dev/null @@ -1,192 +0,0 @@ -## happy path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## unhappy path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## very unhappy path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* chitchat - - utter_chitchat - - restaurant_form -* chitchat - - utter_chitchat - - restaurant_form -* chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## stop but continue path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* stop - - utter_ask_continue -* affirm - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## stop and really stop path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* stop - - utter_ask_continue -* deny - - action_deactivate_form - - form{"name": null} - -## chitchat stop but continue path -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* chitchat - - utter_chitchat - - restaurant_form -* stop - - utter_ask_continue -* affirm - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## stop but continue and chitchat path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* stop - - utter_ask_continue -* affirm - - restaurant_form -* chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## chitchat stop but continue and chitchat path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* chitchat - - utter_chitchat - - restaurant_form -* stop - - utter_ask_continue -* affirm - - restaurant_form -* chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - - utter_slots_values -* thankyou - - utter_noworries - -## chitchat, stop and really stop path -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} -* chitchat - - utter_chitchat - - restaurant_form -* stop - - utter_ask_continue -* deny - - action_deactivate_form - - form{"name": null} - -## Generated Story 3490283781720101690 (example from interactive learning, "form: " will be excluded from training) -* greet - - utter_greet -* request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "cuisine"} -* chitchat - - utter_chitchat <!-- restaurant_form was predicted by FormPolicy and rejected, other policy predicted utter_chitchat --> - - restaurant_form - - slot{"requested_slot": "cuisine"} -* form: inform{"cuisine": "mexican"} - - slot{"cuisine": "mexican"} - - form: restaurant_form - - slot{"cuisine": "mexican"} - - slot{"requested_slot": "num_people"} -* form: inform{"number": "2"} - - form: restaurant_form - - slot{"num_people": "2"} - - slot{"requested_slot": "outdoor_seating"} -* chitchat - - utter_chitchat - - restaurant_form - - slot{"requested_slot": "outdoor_seating"} -* stop - - utter_ask_continue -* affirm - - restaurant_form <!-- FormPolicy predicted FormValidation(False), other policy predicted restaurant_form --> - - slot{"requested_slot": "outdoor_seating"} -* form: affirm - - form: restaurant_form - - slot{"outdoor_seating": true} - - slot{"requested_slot": "preferences"} -* form: inform - - form: restaurant_form - - slot{"preferences": "/inform"} - - slot{"requested_slot": "feedback"} -* form: inform{"feedback": "great"} - - slot{"feedback": "great"} - - form: restaurant_form - - slot{"feedback": "great"} - - form{"name": null} - - slot{"requested_slot": null} - - utter_slots_values -* thankyou - - utter_noworries - -## bot challenge -* bot_challenge - - utter_iamabot diff --git a/examples/formbot/data/stories.yml b/examples/formbot/data/stories.yml new file mode 100644 index 000000000000..239451c9513c --- /dev/null +++ b/examples/formbot/data/stories.yml @@ -0,0 +1,24 @@ +stories: + - story: stop form + continue + steps: + - intent: request_restaurant + - action: restaurant_form + - active_loop: restaurant_form + - intent: stop + - action: utter_ask_continue + - intent: affirm + - action: restaurant_form + - active_loop: null + - action: utter_submit + - action: utter_slots_values + + - story: stop form + stop + steps: + - intent: request_restaurant + - action: restaurant_form + - active_loop: restaurant_form + - intent: stop + - action: utter_ask_continue + - intent: deny + - action: action_deactivate_form + - active_loop: null diff --git a/examples/formbot/domain.yml b/examples/formbot/domain.yml index dc8a4b729d6e..03f402348f75 100644 --- a/examples/formbot/domain.yml +++ b/examples/formbot/domain.yml @@ -13,7 +13,6 @@ intents: entities: - cuisine - - num_people - number - feedback - seating @@ -37,7 +36,7 @@ slots: requested_slot: type: unfeaturized -templates: +responses: utter_ask_cuisine: - text: "what cuisine?" utter_ask_num_people: @@ -49,7 +48,7 @@ templates: utter_ask_feedback: - text: "please give your feedback on your experience so far" utter_submit: - - text: "All done!" + - text: "All done!" utter_slots_values: - text: "I am going to run a restaurant search using the following parameters:\n - cuisine: {cuisine}\n @@ -75,14 +74,39 @@ templates: - text: "Hello! I am restaurant search assistant! How can I help?" utter_iamabot: - text: "I am a bot, powered by Rasa." - -actions: - - utter_slots_values - - utter_noworries - - utter_chitchat - - utter_ask_continue - - utter_greet - - utter_iamabot + utter_restart: + - text: "restarted" forms: - - restaurant_form + - restaurant_form: + cuisine: + - type: from_entity + entity: cuisine + not_intent: chitchat + num_people: + - type: from_entity + entity: number + intent: [inform, request_restaurant] + outdoor_seating: + - type: from_entity + entity: seating + - type: from_intent + intent: affirm + value: true + - type: from_intent + intent: deny + value: false + preferences: + - type: from_intent + intent: deny + value: no additional preferences + - type: from_text + not_intent: affirm + feedback: + - type: from_entity + entity: feedback + - type: from_text + +session_config: + session_expiration_time: 60 # value in minutes + carry_over_slots_to_new_session: true diff --git a/examples/formbot/tests/end-to-end-stories.md b/examples/formbot/tests/end-to-end-stories.md new file mode 100644 index 000000000000..1b9c4700514d --- /dev/null +++ b/examples/formbot/tests/end-to-end-stories.md @@ -0,0 +1,37 @@ +## Happy path +* greet: hi + - utter_greet +* request_restaurant: im looking for a restaurant + - restaurant_form + - form{"name": "restaurant_form"} + - form{"name": null} + - utter_slots_values +* thankyou: thanks + - utter_noworries + +## Happy path with message providing requested value +* greet: hi + - utter_greet +* request_restaurant: im looking for a restaurant + - restaurant_form + - form{"name": "restaurant_form"} +* inform: [afghan](cuisine) food + - restaurant_form + - form{"name": null} + - utter_slots_values +* thankyou: thanks + - utter_noworries + +## unhappy path +* greet: hi + - utter_greet +* request_restaurant: im looking for a restaurant + - restaurant_form + - form{"name": "restaurant_form"} +* chitchat: can you share your boss with me? + - utter_chitchat + - restaurant_form + - form{"name": null} + - utter_slots_values +* thankyou: thanks + - utter_noworries \ No newline at end of file diff --git a/examples/knowledgebasebot/config.yml b/examples/knowledgebasebot/config.yml index 1437093030ba..f2e29f8f3464 100644 --- a/examples/knowledgebasebot/config.yml +++ b/examples/knowledgebasebot/config.yml @@ -1,6 +1,18 @@ language: en -pipeline: supervised_embeddings + +pipeline: + - name: "WhitespaceTokenizer" + - name: "RegexFeaturizer" + - name: "LexicalSyntacticFeaturizer" + - name: "CountVectorsFeaturizer" + - name: "CountVectorsFeaturizer" + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: "DIETClassifier" + epochs: 100 + - name: "EntitySynonymMapper" policies: - - name: MemoizationPolicy - - name: KerasPolicy + - name: RulePolicy + diff --git a/examples/knowledgebasebot/data/nlu.md b/examples/knowledgebasebot/data/nlu.md deleted file mode 100644 index f3b704c0d772..000000000000 --- a/examples/knowledgebasebot/data/nlu.md +++ /dev/null @@ -1,69 +0,0 @@ -## intent:greet -- hey -- hello -- hi -- good morning -- good evening -- hey there - -## intent:goodbye -- bye -- goodbye -- see you around -- see you later - -## intent:query_knowledge_base -- what [restaurants](object_type:restaurant) can you recommend? -- list some [restaurants](object_type:restaurant) -- can you name some [restaurants](object_type:restaurant) please? -- can you show me some [restaurant](object_type:restaurant) options -- list [German](cuisine) [restaurants](object_type:restaurant) -- do you have any [mexican](cuisine) [restaurants](object_type:restaurant)? -- do you know the [price range](attribute:price-range) of [that one](mention)? -- what [cuisine](attribute) is [it](mention)? -- do you know what [cuisine](attribute) the [last one](mention:LAST) has? -- does [Donath](restaurant) have [outside seating](attribute:outside-seating)? -- what is the [price range](attribute:price-range) of [Berlin Burrito Company](restaurant)? -- what is with [I due forni](restaurant)? -- Do you also have any [Vietnamese](cuisine) [restaurants](object_type:restaurant)? -- What about any [Mexican](cuisine) [restaurants](object_type:restaurant)? -- Do you also know some [Italian](cuisine) [restaurants](object_type:restaurant)? -- can you tell me the [price range](attribute) of [that restaurant](mention)? -- what [cuisine](attribute) do [they](mention) have? -- what [hotels](object_type:hotel) can you recommend? -- please list some [hotels](object_type:hotel) in [Frankfurt am Main](city) for me -- what [hotels](object_type:hotel) do you know in [Berlin](city)? -- name some [hotels](object_type:hotel) in [Berlin](city) -- show me some [hotels](object_type:hotel) -- what are [hotels](object_type:hotel) in [Berlin](city) -- does the [last](mention:LAST) one offer [breakfast](attribute:breakfast-included)? -- does the [second one](mention:2) [include breakfast](breakfast-included)? -- what is the [price range](attribute:price-range) of the [second](mention:2) hotel? -- does the [first](mention:1) one has [wifi](attribute:free-wifi)? -- does the [third](mention:3) one has a [swimming pool](attribute:swimming-pool)? -- what is the [star rating](attribute:star-rating) of [Berlin Wall Hostel](hotel)? -- Does the [Hilton](hotel) has a [swimming pool](attribute:swimming-pool)? - - -## lookup:restaurant -- Donath -- Berlin Burrito Company -- I due forni -- Lụa Restaurant -- Pfefferberg -- Marubi Ramen -- Gong Gan - -## lookup:hotel -- Hilton -- B&B -- Berlin Wall Hostel -- City Hotel -- Jugendherberge -- Berlin Hotel - -## intent:bot_challenge -- are you a bot? -- are you a human? -- am I talking to a bot? -- am I talking to a human? \ No newline at end of file diff --git a/examples/knowledgebasebot/data/nlu.yaml b/examples/knowledgebasebot/data/nlu.yaml new file mode 100644 index 000000000000..f93e032c8adf --- /dev/null +++ b/examples/knowledgebasebot/data/nlu.yaml @@ -0,0 +1,77 @@ +version: "2.0" + +nlu: + - intent: bot_challenge + examples: | + - are you a bot? + - are you a human? + - am I talking to a bot? + - am I talking to a human? + + - intent: greet + examples: | + - hey + - hello + - hi + - good morning + - good evening + - hey there + + - intent: goodbye + examples: | + - bye + - goodbye + - see you around + - see you later + + - intent: query_knowledge_base + examples: | + - what [restaurants]{"entity": "object_type", "value": "restaurant"} can you recommend? + - list some [restaurants]{"entity": "object_type", "value": "restaurant"} + - can you name some [restaurants]{"entity": "object_type", "value": "restaurant"} please? + - can you show me some [restaurant]{"entity": "object_type", "value": "restaurant"} options + - list [German]{"entity": "cuisine"} [restaurants]{"entity": "object_type", "value": "restaurant"} + - do you have any [mexican]{"entity": "cuisine"} [restaurants]{"entity": "object_type", "value": "restaurant"}? + - do you know the [price range]{"entity": "attribute", "value": "price-range"} of [that one]{"entity": "mention"}? + - what [cuisine]{"entity": "attribute"} is [it]{"entity": "mention"}? + - do you know what [cuisine]{"entity": "attribute"} the [last one]{"entity": "mention", "value": "LAST"} has? + - does [Donath]{"entity": "restaurant"} have [outside seating]{"entity": "attribute", "value": "outside-seating"}? + - what is the [price range]{"entity": "attribute", "value": "price-range"} of [Berlin Burrito Company]{"entity": "restaurant"}? + - what is with [I due forni]{"entity": "restaurant"}? + - Do you also have any [Vietnamese]{"entity": "cuisine"} [restaurants]{"entity": "object_type", "value": "restaurant"}? + - What about any [Mexican]{"entity": "cuisine", "value": "mexican"} [restaurants]{"entity": "object_type", "value": "restaurant"}? + - Do you also know some [Italian]{"entity": "cuisine"} [restaurants]{"entity": "object_type", "value": "restaurant"}? + - can you tell me the [price range]{"entity": "attribute", "value": "price-range"} of [that restaurant]{"entity": "mention"}? + - what [cuisine]{"entity": "attribute"} do [they]{"entity": "mention"} have? + - what [hotels]{"entity": "object_type", "value": "hotel"} can you recommend? + - please list some [hotels]{"entity": "object_type", "value": "hotel"} in [Frankfurt am Main]{"entity": "city"} for me + - what [hotels]{"entity": "object_type", "value": "hotel"} do you know in [Berlin]{"entity": "city"}? + - name some [hotels]{"entity": "object_type", "value": "hotel"} in [Berlin]{"entity": "city"} + - show me some [hotels]{"entity": "object_type", "value": "hotel"} + - what are [hotels]{"entity": "object_type", "value": "hotel"} in [Berlin]{"entity": "city"} + - does the [last]{"entity": "mention", "value": "LAST"} one offer [breakfast]{"entity": "attribute", "value": "breakfast-included"}? + - does the [second one]{"entity": "mention", "value": "2"} [include breakfast]{"entity": "attribute", "value": "breakfast-included"}? + - what is the [price range]{"entity": "attribute", "value": "price-range"} of the [second]{"entity": "mention", "value": "2"} hotel? + - does the [first]{"entity": "mention", "value": "1"} one have [wifi]{"entity": "attribute", "value": "free-wifi"}? + - does the [third]{"entity": "mention", "value": "3"} one have a [swimming pool]{"entity": "attribute", "value": "swimming-pool"}? + - what is the [star rating]{"entity": "attribute", "value": "star-rating"} of [Berlin Wall Hostel]{"entity": "hotel"}? + - Does the [Hilton]{"entity": "hotel"} have a [swimming pool]{"entity": "attribute", "value": "swimming-pool"}? + + - lookup: restaurant + examples: | + - Donath + - Berlin Burrito Company + - I due forni + - Lụa Restaurant + - Pfefferberg + - Marubi Ramen + - Gong Gan + + - lookup: hotel + examples: | + - Hilton + - B&B + - Berlin Wall Hostel + - City Hotel + - Jugendherberge + - Berlin Hotel diff --git a/examples/knowledgebasebot/data/rules.yaml b/examples/knowledgebasebot/data/rules.yaml new file mode 100644 index 000000000000..66211b10ff39 --- /dev/null +++ b/examples/knowledgebasebot/data/rules.yaml @@ -0,0 +1,19 @@ +version: "2.0" + +rules: + - rule: greet + steps: + - intent: greet + - action: utter_greet + - rule: goodbye + steps: + - intent: goodbye + - action: utter_goodbye + - rule: query knowledge base + steps: + - intent: query_knowledge_base + - action: action_query_knowledge_base + - rule: bot challenge + steps: + - intent: bot_challenge + - action: utter_iamabot diff --git a/examples/knowledgebasebot/data/stories.md b/examples/knowledgebasebot/data/stories.md deleted file mode 100644 index e41279ee146c..000000000000 --- a/examples/knowledgebasebot/data/stories.md +++ /dev/null @@ -1,33 +0,0 @@ -## Happy path 1 -* greet - - utter_greet -* query_knowledge_base - - action_query_knowledge_base -* goodbye - - utter_goodbye - -## Happy path 2 -* greet - - utter_greet -* query_knowledge_base - - action_query_knowledge_base -* query_knowledge_base - - action_query_knowledge_base -* goodbye - - utter_goodbye - -## Hello -* greet -- utter_greet - -## Query Knowledge Base -* query_knowledge_base -- action_query_knowledge_base - -## Bye -* goodbye -- utter_goodbye - -## bot challenge -* bot_challenge - - utter_iamabot diff --git a/examples/knowledgebasebot/domain.yml b/examples/knowledgebasebot/domain.yml index 4979e04da85e..28df3ae4f9ff 100644 --- a/examples/knowledgebasebot/domain.yml +++ b/examples/knowledgebasebot/domain.yml @@ -1,3 +1,5 @@ +version: "2.0" + intents: - greet - goodbye @@ -30,13 +32,9 @@ slots: type: unfeaturized actions: -- utter_greet -- utter_goodbye -- utter_ask_rephrase -- utter_iamabot - action_query_knowledge_base -templates: +responses: utter_greet: - text: "Hey!" - text: "Hello! How can I help you?" @@ -51,3 +49,8 @@ templates: utter_iamabot: - text: "I am a bot, powered by Rasa." + + +session_config: + session_expiration_time: 60 # value in minutes + carry_over_slots_to_new_session: true diff --git a/examples/moodbot/config.yml b/examples/moodbot/config.yml index 51e8629294d2..5b8a3ffbaa66 100644 --- a/examples/moodbot/config.yml +++ b/examples/moodbot/config.yml @@ -1,8 +1,16 @@ language: en -pipeline: "pretrained_embeddings_spacy" +pipeline: + - name: "SpacyNLP" + - name: "SpacyTokenizer" + - name: "SpacyFeaturizer" + - name: "DIETClassifier" + entity_recognition: False + epochs: 50 policies: - - name: KerasPolicy + - name: TEDPolicy + max_history: 5 + epochs: 100 - name: MemoizationPolicy - - name: MappingPolicy + - name: RulePolicy diff --git a/examples/moodbot/credentials.yml b/examples/moodbot/credentials.yml index 0ffd2ede374e..4cf859276e0d 100644 --- a/examples/moodbot/credentials.yml +++ b/examples/moodbot/credentials.yml @@ -14,9 +14,7 @@ telegram: mattermost: url: "https://chat.example.com/api/v4" - team: "community" - user: "user@user.com" - pw: "password" + token: "YOUR-TOKEN" facebook: verify: "rasa-bot" diff --git a/examples/moodbot/data/nlu.md b/examples/moodbot/data/nlu.md deleted file mode 100644 index 24344167855f..000000000000 --- a/examples/moodbot/data/nlu.md +++ /dev/null @@ -1,80 +0,0 @@ -## intent:greet -- hey -- hello -- hi -- hello there -- good morning -- good evening -- moin -- hey there -- let's go -- hey dude -- goodmorning -- goodevening -- good afternoon - -## intent:goodbye -- cu -- good by -- cee you later -- good night -- good afternoon -- bye -- goodbye -- have a nice day -- see you around -- bye bye -- see you later - -## intent:affirm -- yes -- indeed -- of course -- that sounds good -- correct - -## intent:deny -- no -- never -- I don't think so -- don't like that -- no way - -## intent:mood_great -- perfect -- very good -- great -- amazing -- feeling like a king -- wonderful -- I am feeling very good -- I am great -- I am amazing -- I am going to save the world -- super -- extremely good -- so so perfect -- so good -- so perfect - -## intent:mood_unhappy -- my day was horrible -- I am sad -- I don't feel very well -- I am disappointed -- super sad -- I'm so sad -- sad -- very sad -- unhappy -- not so good -- not very good -- extremly sad -- so saad -- so sad - -## intent:bot_challenge -- are you a bot? -- are you a human? -- am I talking to a bot? -- am I talking to a human? diff --git a/examples/moodbot/data/nlu.yml b/examples/moodbot/data/nlu.yml new file mode 100644 index 000000000000..d66033737e3e --- /dev/null +++ b/examples/moodbot/data/nlu.yml @@ -0,0 +1,91 @@ +version: "2.0" + +nlu: +- intent: greet + examples: | + - hey + - hello + - hi + - hello there + - good morning + - good evening + - moin + - hey there + - let's go + - hey dude + - goodmorning + - goodevening + - good afternoon + +- intent: goodbye + examples: | + - good afternoon + - cu + - good by + - cee you later + - good night + - bye + - goodbye + - have a nice day + - see you around + - bye bye + - see you later + +- intent: affirm + examples: | + - yes + - y + - indeed + - of course + - that sounds good + - correct + +- intent: deny + examples: | + - no + - n + - never + - I don't think so + - don't like that + - no way + +- intent: mood_great + examples: | + - perfect + - great + - amazing + - feeling like a king + - wonderful + - I am feeling very good + - I am great + - I am amazing + - I am going to save the world + - super stoked + - extremely good + - so so perfect + - so good + - so perfect + +- intent: mood_unhappy + examples: | + - my day was horrible + - I am sad + - I don't feel very well + - I am disappointed + - super sad + - I'm so sad + - sad + - very sad + - unhappy + - not good + - not very good + - extremly sad + - so saad + - so sad + +- intent: bot_challenge + examples: | + - are you a bot? + - are you a human? + - am I talking to a bot? + - am I talking to a human? diff --git a/examples/moodbot/data/rules.yml b/examples/moodbot/data/rules.yml new file mode 100644 index 000000000000..1a0405cbd723 --- /dev/null +++ b/examples/moodbot/data/rules.yml @@ -0,0 +1,13 @@ +version: "2.0" + +rules: + +- rule: Say goodbye anytime the user says goodbye + steps: + - intent: goodbye + - action: utter_goodbye + +- rule: Say 'I am a bot' anytime the user challenges + steps: + - intent: bot_challenge + - action: utter_iamabot diff --git a/examples/moodbot/data/stories.md b/examples/moodbot/data/stories.md deleted file mode 100644 index 78336fcd8e7c..000000000000 --- a/examples/moodbot/data/stories.md +++ /dev/null @@ -1,31 +0,0 @@ -## happy path <!-- name of the story - just for debugging --> -* greet - - utter_greet -* mood_great <!-- user utterance, in the following format: * intent{"entity_name": value} --> - - utter_happy - -## sad path 1 <!-- this is already the start of the next story --> -* greet - - utter_greet <!-- action of the bot to execute --> -* mood_unhappy - - utter_cheer_up - - utter_did_that_help -* affirm - - utter_happy - -## sad path 2 -* greet - - utter_greet -* mood_unhappy - - utter_cheer_up - - utter_did_that_help -* deny - - utter_goodbye - -## say goodbye -* goodbye - - utter_goodbye - -## bot challenge -* bot_challenge - - utter_iamabot diff --git a/examples/moodbot/data/stories.yml b/examples/moodbot/data/stories.yml new file mode 100644 index 000000000000..0959956677d5 --- /dev/null +++ b/examples/moodbot/data/stories.yml @@ -0,0 +1,31 @@ +version: "2.0" + +stories: + +- story: happy path + steps: + - intent: greet + - action: utter_greet + - intent: mood_great + - action: utter_happy + +- story: sad path 1 + steps: + - intent: greet + - action: utter_greet + - intent: mood_unhappy + - action: utter_cheer_up + - action: utter_did_that_help + - intent: affirm + - action: utter_happy + +- story: sad path 2 + steps: + - intent: greet + - action: utter_greet + - intent: mood_unhappy + - action: utter_cheer_up + - action: utter_did_that_help + - intent: deny + - action: utter_goodbye + diff --git a/examples/moodbot/domain.yml b/examples/moodbot/domain.yml index 51aabb198455..815185cbccd3 100644 --- a/examples/moodbot/domain.yml +++ b/examples/moodbot/domain.yml @@ -1,3 +1,5 @@ +version: "2.0" + intents: - greet - goodbye @@ -7,15 +9,7 @@ intents: - mood_unhappy - bot_challenge -actions: -- utter_greet -- utter_cheer_up -- utter_did_that_help -- utter_happy -- utter_goodbye -- utter_iamabot - -templates: +responses: utter_greet: - text: "Hey! How are you?" buttons: @@ -39,3 +33,7 @@ templates: utter_iamabot: - text: "I am a bot, powered by Rasa." + +session_config: + session_expiration_time: 60 # value in minutes + carry_over_slots_to_new_session: true diff --git a/examples/nlg_server/nlg_server.py b/examples/nlg_server/nlg_server.py index a3582aac4b54..7e1709bd82c0 100644 --- a/examples/nlg_server/nlg_server.py +++ b/examples/nlg_server/nlg_server.py @@ -1,18 +1,18 @@ import argparse import logging +import os from sanic import Sanic, response from rasa.core.domain import Domain from rasa.core.nlg import TemplatedNaturalLanguageGenerator from rasa.core.trackers import DialogueStateTracker +from rasa.constants import ENV_SANIC_BACKLOG, DEFAULT_SANIC_WORKERS logger = logging.getLogger(__name__) DEFAULT_SERVER_PORT = 5056 -DEFAULT_SANIC_WORKERS = 1 - def create_argument_parser(): """Parse all the command line arguments for the nlg server script.""" @@ -70,7 +70,12 @@ async def nlg(request): return response.json(bot_response) - app.run(host="0.0.0.0", port=port, workers=workers) + app.run( + host="0.0.0.0", + port=port, + workers=workers, + backlog=int(os.environ.get(ENV_SANIC_BACKLOG, "100")), + ) if __name__ == "__main__": diff --git a/examples/reminderbot/README.md b/examples/reminderbot/README.md new file mode 100644 index 000000000000..be59887c4cc9 --- /dev/null +++ b/examples/reminderbot/README.md @@ -0,0 +1,63 @@ +# Reminderbot + +The `reminderbot` example demonstrates how your bot can respond to external events or reminders. + +## What’s inside this example? + +This example contains some training data and the main files needed to build an +assistant on your local machine. The `reminderbot` consists of the following files: + +- **data/nlu.yml** contains training examples for the NLU model +- **data/rules.yml** contains rules for the Core model +- **config.yml** contains the model configuration +- **domain.yml** contains the domain of the assistant +- **credentials.yml** contains credentials for the different channels +- **endpoints.yml** contains the different endpoints reminderbot can use +- **actions.py** contains the custom actions that deal with external events and reminders + +## How to use this example? + +To train and chat with `reminderbot`, execute the following steps: + +1. Train a Rasa Open Source model containing the Rasa NLU and Rasa Core models by running: + ``` + rasa train + ``` + The model will be stored in the `/models` directory as a zipped file. + +2. Run a Rasa SDK action server with + ``` + rasa run actions + ``` + +3. (Option 1) Run Rasa X to talk to your bot. In a separate console window from where you ran the step 2 command: + ``` + rasa x + ``` + +3. (Option 2) To test this example without Rasa X, run a + [callback channel](https://rasa.com/docs/rasa/user-guide/connectors/your-own-website/#callbackinput). + In a separate console window from where you ran the step 2 command: + ``` + python callback_server.py + ``` + + This will run a server that prints the bot's responses to the console. + + Start your Rasa server in a third console window: + ``` + rasa run --enable-api + ``` + + You can then send messages to the bot via the callback channel endpoint: + ``` + curl -XPOST http://localhost:5005/webhooks/callback/webhook \ + -d '{"sender": "tester", "message": "hello"}' \ + -H "Content-type: application/json" + ``` + +For more information about the individual commands, please check out our +[documentation](http://rasa.com/docs/rasa/user-guide/command-line-interface/). + +## Encountered any issues? +Let us know about it by posting on [Rasa Community Forum](https://forum.rasa.com)! diff --git a/examples/restaurantbot/__init__.py b/examples/reminderbot/__init__.py similarity index 100% rename from examples/restaurantbot/__init__.py rename to examples/reminderbot/__init__.py diff --git a/examples/reminderbot/actions.py b/examples/reminderbot/actions.py new file mode 100644 index 000000000000..bfd2240ef93d --- /dev/null +++ b/examples/reminderbot/actions.py @@ -0,0 +1,124 @@ +# This files contains your custom actions which can be used to run +# custom Python code. +# +# See this guide on how to implement these action: +# https://rasa.com/docs/rasa/core/actions/#custom-actions/ + + +# This is a simple example for an assistant that schedules reminders and +# reacts to external events. + +from typing import Any, Text, Dict, List +import datetime + +from rasa_sdk import Action, Tracker +from rasa_sdk.events import ReminderScheduled, ReminderCancelled +from rasa_sdk.executor import CollectingDispatcher + + +class ActionSetReminder(Action): + """Schedules a reminder, supplied with the last message's entities.""" + + def name(self) -> Text: + return "action_set_reminder" + + async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[Dict[Text, Any]]: + + dispatcher.utter_message("I will remind you in 5 seconds.") + + date = datetime.datetime.now() + datetime.timedelta(seconds=5) + entities = tracker.latest_message.get("entities") + + reminder = ReminderScheduled( + "EXTERNAL_reminder", + trigger_date_time=date, + entities=entities, + name="my_reminder", + kill_on_user_message=False, + ) + + return [reminder] + + +class ActionReactToReminder(Action): + """Reminds the user to call someone.""" + + def name(self) -> Text: + return "action_react_to_reminder" + + async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[Dict[Text, Any]]: + + name = next(tracker.get_latest_entity_values("name"), "someone") + dispatcher.utter_message(f"Remember to call {name}!") + + return [] + + +class ActionTellID(Action): + """Informs the user about the conversation ID.""" + + def name(self) -> Text: + return "action_tell_id" + + async def run( + self, dispatcher, tracker: Tracker, domain: Dict[Text, Any] + ) -> List[Dict[Text, Any]]: + + conversation_id = tracker.sender_id + + dispatcher.utter_message(f"The ID of this conversation is '{conversation_id}'.") + dispatcher.utter_message( + f"Trigger an intent with: \n" + f'curl -H "Content-Type: application/json" ' + f'-X POST -d \'{{"name": "EXTERNAL_dry_plant", ' + f'"entities": {{"plant": "Orchid"}}}}\' ' + f'"http://localhost:5005/conversations/{conversation_id}' + f'/trigger_intent?output_channel=latest"' + ) + + return [] + + +class ActionWarnDry(Action): + """Informs the user that a plant needs water.""" + + def name(self) -> Text: + return "action_warn_dry" + + async def run( + self, + dispatcher: CollectingDispatcher, + tracker: Tracker, + domain: Dict[Text, Any], + ) -> List[Dict[Text, Any]]: + + plant = next(tracker.get_latest_entity_values("plant"), "someone") + dispatcher.utter_message(f"Your {plant} needs some water!") + + return [] + + +class ForgetReminders(Action): + """Cancels all reminders.""" + + def name(self) -> Text: + return "action_forget_reminders" + + async def run( + self, dispatcher, tracker: Tracker, domain: Dict[Text, Any] + ) -> List[Dict[Text, Any]]: + + dispatcher.utter_message(f"Okay, I'll cancel all your reminders.") + + # Cancel all reminders + return [ReminderCancelled()] diff --git a/examples/reminderbot/callback_server.py b/examples/reminderbot/callback_server.py new file mode 100644 index 000000000000..b63f66b1e082 --- /dev/null +++ b/examples/reminderbot/callback_server.py @@ -0,0 +1,27 @@ +from sanic import Sanic, response +from sanic.request import Request +from sanic.response import HTTPResponse + + +def create_app() -> Sanic: + + bot_app = Sanic(__name__, configure_logging=False) + + @bot_app.post("/bot") + def print_response(request: Request) -> HTTPResponse: + """Print bot response to the console.""" + bot_response = request.json.get("text") + print(f"\n{bot_response}") + + body = {"status": "message sent"} + return response.json(body, status=200) + + return bot_app + + +if __name__ == "__main__": + app = create_app() + port = 5034 + + print(f"Starting callback server on port {port}.") + app.run("0.0.0.0", port) diff --git a/examples/reminderbot/config.yml b/examples/reminderbot/config.yml new file mode 100644 index 000000000000..1ab1688c5ecd --- /dev/null +++ b/examples/reminderbot/config.yml @@ -0,0 +1,16 @@ +language: en +pipeline: +- name: "WhitespaceTokenizer" +- name: "RegexFeaturizer" +- name: "LexicalSyntacticFeaturizer" +- name: "CountVectorsFeaturizer" +- name: "CountVectorsFeaturizer" + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 +- name: "DIETClassifier" + epochs: 100 +- name: "EntitySynonymMapper" + +policies: +- name: RulePolicy \ No newline at end of file diff --git a/examples/reminderbot/credentials.yml b/examples/reminderbot/credentials.yml new file mode 100644 index 000000000000..6df88c152733 --- /dev/null +++ b/examples/reminderbot/credentials.yml @@ -0,0 +1,37 @@ +# This file contains the credentials for the voice & chat platforms +# which your bot is using. +# https://rasa.com/docs/rasa/user-guide/messaging-and-voice-channels/ + +# # NOTE: the rest channel does not work with external events +# rest: +# you don't need to provide anything here - this channel doesn't +# require any credentials + +callback: + # URL to which Rasa Open Source will send the bot responses + # See https://rasa.com/docs/rasa/user-guide/connectors/your-own-website/#callbackinput + url: "http://localhost:5034/bot" + +#facebook:cz +# verify: "<verify>" +# secret: "<your secret>" +# page-access-token: "<your page access token>" + +#slack: +# slack_token: "<your slack token>" +# slack_channel: "<the slack channel>" + +#socketio: +# user_message_evt: <event name for user message> +# bot_message_evt: <event name for bot messages> +# session_persistence: <true/false> + +#mattermost: +# url: "https://<mattermost instance>/api/v4" +# team: "<mattermost team>" +# user: "<bot username>" +# pw: "<bot token>" +# webhook_url: "<callback URL>" + +rasa: + url: "http://localhost:5002/api" diff --git a/examples/reminderbot/data/nlu.yml b/examples/reminderbot/data/nlu.yml new file mode 100644 index 000000000000..6f6891c99041 --- /dev/null +++ b/examples/reminderbot/data/nlu.yml @@ -0,0 +1,51 @@ +version: "2.0" + +nlu: + - intent: greet + examples: | + - hey + - hello + - hi + - good morning + - good evening + - hey there + + - intent: bye + examples: | + - bye + - good bye + - ciao + - see you + - see ya + + - intent: ask_remind_call + examples: | + - remind me to call [John]{"entity":"name"} + - remind me to call [Lis]{"entity":"name"} + - remind me to call [Albert]{"entity":"name"} + - remind me to call [Susan]{"entity":"name"} + - later I have to call [Alan]{"entity":"name"} + - later I have to call [Jessie]{"entity":"name"} + - later I have to call [Alex]{"entity":"name"} + - Please, remind me to call [vova]{"entity":"name"} + - please remind me to call [tanja]{"entity":"name"} + - I must not forget to call [santa]{"entity":"name"} + - I must not forget to call [Daksh]{"entity":"name"} + - I must not forget to call [Juste]{"entity":"name"} + + - intent: ask_id + examples: | + - what's the conversation id? + - id + - What is the ID of this conversation? + - How do I send a POST request to this conversation? + + - intent: ask_forget_reminders + examples: | + - forget about it + - don't remind me! + - Forget about the reminder + - do not remind me + - do not remind me! + - Forget reminding me + - Forget reminding me! diff --git a/examples/reminderbot/data/rules.yml b/examples/reminderbot/data/rules.yml new file mode 100644 index 000000000000..326ff0531e63 --- /dev/null +++ b/examples/reminderbot/data/rules.yml @@ -0,0 +1,31 @@ +version: "2.0" + +rules: +- rule: greet + steps: + - intent: greet + - action: utter_what_can_do +- rule: remind call + steps: + - intent: ask_remind_call + - action: action_set_reminder +- rule: forget reminder + steps: + - intent: ask_forget_reminders + - action: action_forget_reminders +- rule: say goodbye + steps: + - intent: bye + - action: utter_goodbye +- rule: convo action_tell_id + steps: + - intent: ask_id + - action: action_tell_id +- rule: warn dry plant + steps: + - intent: EXTERNAL_dry_plant + - action: action_warn_dry +- rule: react to reminder + steps: + - intent: EXTERNAL_reminder + - action: action_react_to_reminder diff --git a/examples/reminderbot/domain.yml b/examples/reminderbot/domain.yml new file mode 100644 index 000000000000..1ce966c98a64 --- /dev/null +++ b/examples/reminderbot/domain.yml @@ -0,0 +1,28 @@ +version: "2.0" + +session_config: + session_expiration_time: 60 + carry_over_slots_to_new_session: true +intents: +- greet +- ask_remind_call +- ask_forget_reminders +- bye +- ask_id +- EXTERNAL_dry_plant +- EXTERNAL_reminder +- EXT_reminder +entities: +- name +- plant +responses: + utter_what_can_do: + - text: What can I do for you? + utter_goodbye: + - text: Bye +actions: +- action_set_reminder +- action_forget_reminders +- action_react_to_reminder +- action_tell_id +- action_warn_dry diff --git a/examples/reminderbot/endpoints.yml b/examples/reminderbot/endpoints.yml new file mode 100644 index 000000000000..2ff5a0923d65 --- /dev/null +++ b/examples/reminderbot/endpoints.yml @@ -0,0 +1,42 @@ +# This file contains the different endpoints your bot can use. + +# Server where the models are pulled from. +# https://rasa.com/docs/rasa/user-guide/running-the-server/#fetching-models-from-a-server/ + +#models: +# url: http://my-server.com/models/default_core@latest +# wait_time_between_pulls: 10 # [optional](default: 100) + +# Server which runs your custom actions. +# https://rasa.com/docs/rasa/core/actions/#custom-actions/ + +action_endpoint: + url: "http://localhost:5055/webhook" + +# Tracker store which is used to store the conversations. +# By default the conversations are stored in memory. +# https://rasa.com/docs/rasa/api/tracker-stores/ + +#tracker_store: +# type: redis +# url: <host of the redis instance, e.g. localhost> +# port: <port of your redis instance, usually 6379> +# db: <number of your database within redis, e.g. 0> +# password: <password used for authentication> +# use_ssl: <whether or not the communication is encrypted, default false> + +#tracker_store: +# type: mongod +# url: <url to your mongo instance, e.g. mongodb://localhost:27017> +# db: <name of the db within your mongo instance, e.g. rasa> +# username: <username used for authentication> +# password: <password used for authentication> + +# Event broker which all conversation events should be streamed to. +# https://rasa.com/docs/rasa/api/event-brokers/ + +#event_broker: +# url: localhost +# username: username +# password: password +# queue: queue diff --git a/examples/restaurantbot/README.md b/examples/restaurantbot/README.md deleted file mode 100644 index da522c27ce03..000000000000 --- a/examples/restaurantbot/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Restaurant Bot - -This example includes a file called `run.py`, which contains an example -of how to use Rasa directly from your python code. - -## What’s inside this example? - -This example contains some training data and the main files needed to build an -assistant on your local machine. The `restaurantbot` consists of the following files: - -- **data/nlu.md** contains training examples for the NLU model -- **data/stories.md** contains training stories for the Core model -- **actions.py** contains some custom actions -- **config.yml** contains the model configuration -- **domain.yml** contains the domain of the assistant -- **endpoints.yml** contains the webhook configuration for the custom action -- **policy.py** contains a custom policy -- **run.py** contains code to train a Rasa model and use it to parse some text - -## How to use this example? - -To train your restaurant bot, execute -``` -rasa train -``` -This will store a zipped model file in `models/`. - -To chat with the bot on the command line, run -``` -rasa shell -``` - -Or you can start an action server plus a Rasa server by -``` -rasa run actions -rasa run -m models --endpoints endpoints.yml -``` - -For more information about the individual commands, please check out our -[documentation](http://rasa.com/docs/rasa/user-guide/command-line-interface/). - -## Encountered any issues? -Let us know about it by posting on [Rasa Community Forum](https://forum.rasa.com)! diff --git a/examples/restaurantbot/actions.py b/examples/restaurantbot/actions.py deleted file mode 100644 index bc48cf762662..000000000000 --- a/examples/restaurantbot/actions.py +++ /dev/null @@ -1,31 +0,0 @@ -from rasa_sdk import Action -from rasa_sdk.events import SlotSet - - -class RestaurantAPI(object): - def search(self, info): - return "papi's pizza place" - - -class ActionSearchRestaurants(Action): - def name(self): - return "action_search_restaurants" - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_message("looking for restaurants") - restaurant_api = RestaurantAPI() - restaurants = restaurant_api.search(tracker.get_slot("cuisine")) - return [SlotSet("matches", restaurants)] - - -class ActionSuggest(Action): - def name(self): - return "action_suggest" - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_message("here's what I found:") - dispatcher.utter_message(tracker.get_slot("matches")) - dispatcher.utter_message( - "is it ok for you? hint: I'm not going to find anything else :)" - ) - return [] diff --git a/examples/restaurantbot/config.yml b/examples/restaurantbot/config.yml deleted file mode 100644 index 695005196169..000000000000 --- a/examples/restaurantbot/config.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: en - -pipeline: - - name: "SpacyNLP" - - name: "SpacyTokenizer" - - name: "SpacyFeaturizer" - - name: "SklearnIntentClassifier" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - -policies: - - name: "examples.restaurantbot.policy.RestaurantPolicy" - batch_size: 100 - epochs: 100 - validation_split: 0.2 - - name: MemoizationPolicy - - name: MappingPolicy diff --git a/examples/restaurantbot/data/nlu.md b/examples/restaurantbot/data/nlu.md deleted file mode 100644 index 45b02dd60345..000000000000 --- a/examples/restaurantbot/data/nlu.md +++ /dev/null @@ -1,755 +0,0 @@ -## intent:affirm -- uh yes im looking for a cheap restaurant in the west part of town -- yeah a cheap restaurant serving international food -- correct -- ye -- uh yes restaurant that serves danish food -- let's do it -- yeah -- yes that sells korean food -- yes can i have can i get swedish food in any area -- yes id like an expensive restaurant in the east part of town -- uh yes a cheap restaurant -- yes that serves korean food -- um yes -- yes knocking -- yes italian food -- yes in the moderately priced -- thats correct gastropub food -- uh yes im looking for a cheap restaurant that serves medetanian food -- yes yes -- uh yes can i find a restaurant in the east part of town that serves chinese -- uh yes im looking for malaysian food -- right -- yea -- yes -- yes i am in the east part of town right now and i am looking for any place that serves indonesian food -- yes south part of town -- yes right -- yes and i dont care about the price range -- yeah i need to find -- uh yes i need the north part of town -- uh yeah im looking for a restaurant in the south part of town and that serves kosher food -- yea thank you good bye -- yes can i have -- yes and in french please -- uh yes can i find a moderately priced restaurant and it serve it should serve brazilian food -- right on good bye peace -- yes in the west part of town -- yes barbecue food -- i love that -- yes spanish restaurant - -## intent:deny -- no danish food -- no north -- no -- no new selection -- no im looking for pan asian -- no thanks -- no i want american food -- no thank you good bye -- no thank you -- no spanish food -- no im looking in the south of the town -- no indian food -- uh no -- no american food -- no the south part -- oh no and cheap -- no spanish -- no british food -- no south part of town -- no im looking for one that serves vietnamese food -- do you have something else -- no chinese -- no i want halal food -- no hungarian food -- no center -- no this does not work for me -- no thai - -## intent:greet -- hi there -- hello there -- hey -- hi -- hey bot -- good morning -- goodmorning -- hello -- goodevening -- goodafternoon -- good evening -- morning -- good afternoon - -## intent:inform -- [afghan](cuisine) food -- how bout [asian oriental](cuisine) -- im looking for a [moderately](price:moderate) priced restaurant in the [east](location) part of town -- find [moderately](price:moderate) priced restaurant in the [west](location) part of town -- what about [indian](cuisine) food -- [south](location) part of town -- uh how about [turkish](cuisine) type of food -- yea im looking for a really [cheap](price) restaurant -- a [moderate](price) restaurant in the [west](location) part of town -- um [english](cuisine) -- im looking for [tuscan](cuisine) food -- i need a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- [cheap](price) restaurant in the [east](location) of town -- id like [moderately](price:moderate) priced [moroccan](cuisine) food -- im looking for an [expensive](price) restaurant in the [east](location) town -- [moderately](price:moderate) priced food -- restaurant [north](location) part of town [seafood](cuisine) -- [moderately](price:moderate) priced restaurant in the [south](location) part of town -- [north](location) part of town -- [north](location) part -- [french](cuisine) food -- serves [british](cuisine) food -- i need a [moderately](price:moderate) priced restaurant -- i want a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- i want a [cheap](price) restaurant in the [south](location) part of town -- id like a restaurant in any area that it serves [canapes](cuisine) -- im looking for a restaurant in the [south](location) part of town serving [jamaican](cuisine) food -- um what about [italian](cuisine) food -- im looking for a [cheap](price) restaurant in the [north](location) part of town -- the [south](location) part of town -- im looking for [corsica](cuisine) food -- [south](location) -- im looking for an [expensive](price) restaurant that serves sea food -- ah im looking for a [cheap](price) restaurant -- im looking for [world](cuisine) food -- im looking for a restaurant that serves [french](cuisine) food -- how about [indian](cuisine) food -- can i get a restaurant serving [chinese](cuisine) food -- [irish](cuisine) food -- im looking for a restaurant in the [west](location) part of town serving [italian](cuisine) food -- restaurant [south](location) part of town [english](cuisine) food -- [spanish](cuisine) food -- how bout one that serves [portuguese](cuisine) food and is cheap -- [german](cuisine) -- i need a [moderately](price:moderate) priced restaurant in the [north](location) of town -- [korean](cuisine) food -- im looking for [romanian](cuisine) food in the [expensive](price) price range -- i want to find a [moderately](price:moderate) priced restaurant that serves [canapes](cuisine) food -- im looking for a [moderately](price:moderate) priced labonese restaruant -- looking for a [moderately](price:moderate) priced restaurant -- id like to find an [expensive](price) restaurant -- [gastropub](cuisine) -- i want a restaurant that serves [french](cuisine) food -- restaurant [north](location) part of town fast food -- how about [modern european](cuisine) type of food -- im looking for a restaurant on the [east](location) part of town that serves scandinavia food -- the [west](location) part of town -- i want to find a [cheap](price) restaurant and it should serve [scandinavian](cuisine) food -- how [european](cuisine) -- how about [european](cuisine) food -- looking for something [moderately](price:moderate) priced in the [north](location) side of town -- the [south](location) part of town id like a restaurant that serves [traditional](cuisine) food -- i need a [cheap](price) restaurant that serves [indonesian](cuisine) food -- [modern european](cuisine) -- id like to find a [moderately](price:moderate) priced restaurant in the [east](location) part of town -- looking for a [moderately](price:moderate) priced restaurant that serves [brazilian](cuisine) -- i would like [modern european](cuisine) food -- hi looking for a [moderately](price:moderate) priced restaurant -- looking for a restaurant that serves [lebanese](cuisine) food -- [east](location) part of town -- [west](location) -- [cheap](price) restaurant in the [west](location) part of town -- [portuguese](cuisine) -- [european](cuisine) -- [expensive](price) food in the [south](location) of town -- i want an [expensive](price) restaurant that serves [polish](cuisine) food -- id like an [expensive](price) [thai](cuisine) restaurant -- i want to find a [moderately](price:moderate) priced restaurant that serves [moroccan](cuisine) food -- [afghan](cuisine) -- [expensive](price) restaurant [scottish](cuisine) food -- how about [vietnamese](cuisine) -- hi im looking for [mexican](cuisine) food -- can i have a [moderately](price:moderate) priced restaurant -- how about [indian](cuisine) type of food -- i would like a [cheap](price) restaurant in the [north](location) part of town -- a [cheap](price) restaurant in the [west](location) part of town -- [polynesian](cuisine) food -- [mexican](cuisine) -- looking for a [cheap](price) restaurant in the [south](location) part of town -- instead could it be for [four](people:4) people -- restaurant any area [japanese](cuisine) food -- im looking for a restaurant in the [north](location) part of town -- what about [thai](cuisine) food -- how about [asian oriental](cuisine) food -- im looking for a restaurant that serves [japanese](cuisine) food -- im looking for a restaurant in the [north](location) part of town that serves [belgian](cuisine) food -- im looking for a restaurant that serves [turkish](cuisine) food -- restaurant in [west](location) part of town serving [corsica](cuisine) food -- [moderately](price:moderate) priced serving gastro pub -- is there a restaurant serving [british](cuisine) food -- [world](cuisine) food -- im looking for something serves [japanese](cuisine) food -- im looking for an [expensive](price) restaurant and it should be in the [south](location) part of town -- id like a [greek](cuisine) restaurant in the [east](location) part of town -- im looking for an [expensive](price) restaurant serving [malaysian](cuisine) food -- i want to find a restaurant serving [world](cuisine) food -- im looking for a restaurant in the [south](location) part of town that serves pan asian food -- looking for an [expensive](price) restaurant that serves [afghan](cuisine) food -- actually i would prefer in [madrid](location) -- what is a good [cheap](price) restaurant that serves [portuguese](cuisine) food -- [asian oriental](cuisine) food -- im looking for a restaurant that serves [russian](cuisine) food -- [corsica](cuisine) -- id like an [expensive](price) restaurant that serves mediteranean food -- [moderately](price:moderate) priced in [south](location) part -- [moderate](price) price [west](location) part of town -- [north](location) -- [asian oriental](cuisine) -- restaurant in the [east](location) part of town serving [basque](cuisine) food -- i am looking for a [cheap](price) restaurant and it should be in the [west](location) part of town -- [moderate](price) priced -- how about [italian](cuisine) -- looking for [spanish](cuisine) food in the center of town -- hi im looking for a [moderately](price:moderate) priced restaurant -- im looking for a restaurant in any area it should serve [gastropub](cuisine) food -- [welsh](cuisine) food -- i want [vegetarian](cuisine) food -- im looking for [swedish](cuisine) food -- um how about [chinese](cuisine) food -- a restaurant in the [east](location) part of town that serves [caribbean](cuisine) food -- i need a restaurant in the [east](location) part of town -- in the [west](location) part -- [expensive](price) restaurant serving [world](cuisine) food -- can i have a [seafood](cuisine) restaurant please -- how about [italian](cuisine) food -- how about [korean](cuisine) -- uh [cheap](price) restaurant in [north](location) part of town -- [corsica](cuisine) food -- [scandinavian](cuisine) -- [cheap](price) restaurant that serves [vegetarian](cuisine) food -- what about [italian](cuisine) -- how about [portuguese](cuisine) food -- im looking for an [expensive](price) restaurant serving [french](cuisine) food -- looking for a [moderately](price:moderate) priced restaurant and it needs to be in the [south](location) part of town -- [south](location) part -- i would like to find a [moderately](price:moderate) priced restaurant in the [north](location) part of town -- [tuscan](cuisine) food -- how about uh [gastropub](cuisine) -- im looking for a restaurant in the [east](location) part that serves [creative](cuisine) food -- im looking for a restaurant in the [south](location) part of town that serves [malaysian](cuisine) food -- i'd like to book a table for [two](people:2) in [madrid](location) with [spanish](cuisine) cuisine in a [moderate](price:mid) price range -- i need a [cheap](price) restaurant in the [west](location) part of town -- im looking for a restaurant that serves [unusual](cuisine) food -- im looking for a restaurant in the [south](location) part of town and [danish](cuisine) food -- how about [spanish](cuisine) food -- im looking for a [cheap](price) restaurant and it should be in the [south](location) part of town -- im looking for a [cheap](price) restaurant that serves [vietnamese](cuisine) food -- [spanish](cuisine) -- im looking for a restaurant serving [romanian](cuisine) food -- im looking for [lebanese](cuisine) food -- i need a [moderately](price:moderate) priced restaurant in the [north](location) part of town -- im looking for a restaurant serving [international](cuisine) food -- im looking for a restaurant in the center that serves [turkish](cuisine) food -- [north american](cuisine) -- hello im looking for an [expensive](price) restaurant -- instead could it be with [indian](cuisine) cuisine -- [expensive](price) -- im looking for a [thai](cuisine) restaurant -- want something in the [south](location) side of town thats [moderately](price:moderate) priced -- im looking for [moroccan](cuisine) food -- looking for [cheap](price) barbecue food -- [west](location) part of town -- how about an expensive restaurant that serves [european](cuisine) food -- we will be [eight](people:8) -- im looking for [greek](cuisine) food -- find me a cheap [vietnamese](cuisine) restaurant -- serves [halal](cuisine) food -- [international](cuisine) food in the [east](location) part of town -- [south](location) of town -- im looking for something [cheap](price) -- im looking for a restaurant in the [east](location) part of town serving [catalan](cuisine) food -- i need a restaurant in the [west](location) part of town -- [moderate](price) restaurant [kosher](cuisine) food -- what about the [west](location) part of town -- hi im looking for a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- the [indian](cuisine) food -- looking for an [expensive](price) restaurant in the [south](location) part of town -- uh a restaurant in the [south](location) part of town -- i want [malaysian](cuisine) food in any area -- hello [chinese](cuisine) type of food -- id like to find a restaurant that serves [korean](cuisine) food -- looking for a restaurant in the [south](location) part of town that serves [australian](cuisine) food -- uh how about [italian](cuisine) -- [greek](cuisine) food -- im looking for a [cheap](price) restaurant serving [modern european](cuisine) type of food -- yea im looking for a really [cheap](price) restaurant in the [east](location) part of town -- i love [indian](cuisine) food -- how about [korean](cuisine) food -- [cheap](price) [european](cuisine) food -- hi im looking for an [expensive](price) restaurant in the [south](location) part of town -- im looking for [belgian](cuisine) food -- i want to find a restaurant in the [north](location) part of town -- how about [gastropub](cuisine) food -- [mediterranean](cuisine) food -- [venetian](cuisine) food -- [moderate](price) restaurant [south](location) part of town -- ah [centre](location) -- im looking for a [cheap](price) pan asian food -- how about [chinese](cuisine) food -- looking for [lebanese](cuisine) food in the city center -- [singaporean](cuisine) food -- [east](location) -- looking for a [cheap](price) restaurant that serves steak house food -- um what about [european](cuisine) -- uh how about [north american](cuisine) -- actually i would prefer for [eight](people:8) people -- [cheap](price) price range -- [belgian](cuisine) food -- i need a [cheap](price) restaurant in the [east](location) part of town -- im looking for an [expensive](price) restaurant that serves [irish](cuisine) food -- [scandinavian](cuisine) food -- what about in the [west](location) area -- on the [south](location) part of town -- [east](location) -- how about [british](cuisine) food -- how about [international](cuisine) food -- uh okay how about [french](cuisine) food -- id like a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- im looking for a restaurant in the [west](location) part of town that serves [moroccan](cuisine) food -- [expensive](price) restaurant [east](location) part of town -- um i dont care i just want [traditional](cuisine) food -- okay is there any [portuguese](cuisine) food -- how about [french](cuisine) -- im looking for [japanese](cuisine) food -- i want a [cheap](price) restaurant that serves [unusual](cuisine) food -- what about [chinese](cuisine) food -- how about [english](cuisine) food in the north part of town -- [turkish](cuisine) -- how about [mediterranean](cuisine) food -- [french](cuisine) -- actually i would prefer for [six](people:6) people -- uh [italian](cuisine) -- [cantonese](cuisine) food -- im looking for a restaurant that serves [african](cuisine) food -- um [moderate](price) -- [danish](cuisine) food -- [brazilian](cuisine) food -- lets see im looking for a restaurant in the [north](location) part of town that serves [vietnamese](cuisine) food -- [venetian](cuisine) -- i want a restaurant serving [greek](cuisine) food -- can you book a table in [london](location) in a [expensive](price:hi) price range with [spanish](cuisine) cuisine for [two](people:2) -- [italian](cuisine) food -- im looking for a restaurant in the [south](location) part of town serving [austrian](cuisine) food -- a restaurant with [afghan](cuisine) food -- im looking for [traditional](cuisine) food -- im looking for an [expensive](price) restaurant -- can i find an [expensive](price) restaurant that serves [traditional](cuisine) food -- [vietnamese](cuisine) food -- anything [expensive](price) -- i want to find a [cheap](price) re -- im looking for a restaurant in the center of town that serves [african](cuisine) food -- is there a restaurant that has [british](cuisine) food -- im looking for a restaurant in the [east](location) part of town that serves [traditional](cuisine) food -- [japanese](cuisine) -- [italian](cuisine) -- [hungarian](cuisine) food -- in the [south](location) part of town -- un [unusual](cuisine) food -- im looking for an [italian](cuisine) restaurant in the [east](location) part of town -- [polynesian](cuisine) -- [christmas](cuisine) food -- [korean](cuisine) -- im looking for a [moderately](price:moderate) priced restaurant serving [fusion](cuisine) food -- looking for a [moderately](price:moderate) priced restaurant that serves [unusual](cuisine) food -- is there anything in the [cheap](price) price range -- the [east](location) -- oh i need to be in the [north](location) -- how about [turkish](cuisine) food -- i want a restaurant in the [north](location) part of town that serves [vietnamese](cuisine) food -- actually i would prefer in [bombay](location) -- how about [italian](cuisine) type of food -- im looking for a [moderately](price:moderate) priced restaurant and it should serve [polynesian](cuisine) food -- [polish](cuisine) food -- i want to find a [moderately](price:moderate) priced restaurant in the [east](location) part town -- im looking for a restaurant in the [west](location) part of town that serves canape food -- looking for a [cheap](price) restaurant that serves [creative](cuisine) food -- im looking for a restaurant in the [east](location) part of the town with [indian](cuisine) asian food -- [thai](cuisine) -- how about [persian](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant that serves [tuscan](cuisine) food -- [cheap](price) restaurant -- how about [british](cuisine) type food -- [west](location) of town -- what about [vietnamese](cuisine) type of food -- any kind of food id like a [cheap](price) restaurant -- i would like [steakhouse](cuisine) food -- [polish](cuisine) -- how about [modern european](cuisine) food -- like to find a restaurant in the [east](location) part of town and it should serve [indian](cuisine) food -- what about [expensive](price) -- ok what about [indian](cuisine) -- [moderately](price:moderate) priced restaurant in the [west](location) part of town -- [south](location) [expensive](price) -- im looking for a restaurant in the [east](location) part of town -- um in the [east](location) part of town -- how about [gastropub](cuisine) type of food -- uh what about [portuguese](cuisine) food -- [expensive](price) restaurant in the [south](location) part of town -- what about [vietnamese](cuisine) food -- i want to find a restaurant that serves [world](cuisine) food -- [moderate](price) price range please -- im looking for [moderately](price:moderate) priced restaurant serving [austrian](cuisine) food -- i am looking for a restaurant serving [afghan](cuisine) food -- i would like a restaurant that serves [korean](cuisine) food -- restaurant in the [north](location) part of town that serves [hungarian](cuisine) food -- the [south](location) -- how about [modern european](cuisine) -- i want to find a restaurant in the center and it should serve [lebanese](cuisine) food -- [chinese](cuisine) -- im looking a restaurant in the [east](location) part of town -- im looking for a [cheap](price) restaurant with [spanish](cuisine) food -- [greek](cuisine) -- [north](location) part of town serving [gastropub](cuisine) food -- is there one that serves [indian](cuisine) food -- [unusual](cuisine) food -- im looking for a [cheap](price) restaurant in the [west](location) part of town -- im looking for a restaurant that serves [polynesian](cuisine) food -- [moderately](price:moderate) restaurant -- what about [asian oriental](cuisine) -- [cheap](price) restaurant [north](location) part of town -- iam looking for an [expensive](price) restaurant and it should be in the [south](location) part of town -- looking for a [japanese](cuisine) restaurant in the center -- what about [portuguese](cuisine) food -- fancy restaurant [moroccan](cuisine) food -- what about [british](cuisine) food -- [indian](cuisine) -- what about [indian](cuisine) -- [moderately](price:moderate) -- i need a restaurant in the center of town that includes [international](cuisine) food -- im looking for an [expensive](price) restaurant and it should be [east](location) part of town -- [hungarian](cuisine) -- the [south](location) part -- how about an [indian](cuisine) restaurant in the north part of town -- okay how about a [gastropub](cuisine) -- how about [indian](cuisine) type food -- an [expensive](price) restaurant -- [world](cuisine) -- [cheap](price) restaurant [east](location) part of town -- the [east](location) part of town -- im looking for a [moderately](price:moderate) priced restaurant that serves [european](cuisine) food -- [crossover](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant -- i need a [moderately](price:moderate) priced restaurant in the [south](location) part of town -- [expensive](price) european food -- uh [indian](cuisine) -- im looking for something [moderately](price:moderate) priced -- what about [international](cuisine) food -- um in the [south](location) part of town and its [cheap](price) -- i would like to find a restaurant in the center and it should serve [korean](cuisine) food -- dont care [expensive](price) -- im looking for [moderately](price:moderate) priced restaurant and it should serve [halal](cuisine) food -- system [european](cuisine) food -- i would like [european](cuisine) food -- how about [korean](cuisine) foo -- [thai](cuisine) type food -- i want to find a [cheap](price) restaurant and it should be in the [east](location) part of town -- a [moderately](price:moderate) priced restaurant in the [south](location) part of town -- what about [modern european](cuisine) type food -- im looking for a restaurant in the center part of town that serves [european](cuisine) food -- [moderately](price:moderate) priced restaurant that serves [creative](cuisine) food -- im looking for an [expensive](price) restaurant in the [east](location) part of town -- [moderately](price:moderate) priced restaurant serving [indian](cuisine) food -- [tuscan](cuisine) food [south](location) part of town -- [chinese](cuisine) food -- i need [indian](cuisine) food -- may i have a table in a [moderate](price:mid) price range with [spanish](cuisine) food in [bombay](location) for [two](people:2) people -- looking for an [expensive](price) restaurant that serves [indonesian](cuisine) food -- looking for something on the [south](location) side -- id like to find a restaurant in the [north](location) part of town that serves [german](cuisine) food -- im looking for a restaurant in any area that serves [bistro](cuisine) food -- can i have [greek](cuisine) food -- [vietnamese](cuisine) -- [portuguese](cuisine) food -- [lebanese](cuisine) food -- looking for an [expensive](price) restaurant -- i need a [moderately](price:moderate) priced restaurant serving [fusion](cuisine) food -- can i find a restaurant that serves [international](cuisine) food -- i want to find a [moderately](price:moderate) priced restaurant -- how about [spanish](cuisine) types of food -- [caribbean](cuisine) food -- what about a restaurant that serves [chinese](cuisine) food -- im looking for a restaurant serving [asian oriental](cuisine) food -- can you make a restaurant reservation with [italian](cuisine) food for [eight](people:8) in [bombay](location) in a [expensive](price) price range -- im looking for an expensive restaurant and it should serve [international](cuisine) food -- im looking for a restaurant that serves [swiss](cuisine) food -- [expensive](price) restaurant [welsh](cuisine) food -- im looking for an [expensive](price) restaurant in the [south](location) part of town -- im looking for a restaurant in the [west](location) park of town that serves [australian](cuisine) food -- [moderately](price:moderate) pri -- i want to find a restaurant in the center that serves [lebanese](cuisine) food -- i need a [moderate](price) priced restaurant in the [north](location) part of town -- im looking for an [expensive](price) restaurant in the [east](location) of town -- hi im looking for an restaurant in the center that serves [korean](cuisine) food -- [thai](cuisine) food -- can you make a restaurant reservation in a [cheap](price:lo) price range in [london](location) with [spanish](cuisine) food for [eight](people:8) people -- [east](location) of town -- [cheap](price) restaurant in the [south](location) part of town -- could i have a [cheap](price) restaurant in the [east](location) part of town -- [international](cuisine) -- uh id like [steakhouse](cuisine) food -- how about [korean](cuisine) type food -- i want [expensive](price) food in the [east](location) part of town -- i'd like to book a table in a [cheap](price:lo) price range for [six](people:6) people with [spanish](cuisine) cuisine in [madrid](location) -- could i have a [moderately](price:moderate) priced restaurant in the [south](location) part of town -- for a restaurant in any area with [international](cuisine) food -- what about [modern european](cuisine) -- could i have a [cheap](price) restaurant -- im looking for a [cheap](price) restaurant in the [east](location) part of town -- [english](cuisine) food -- i would like a [cheap](price) restaurant -- im looking for a [cheap](price) restaurant that serves [german](cuisine) food -- [moderately](price:moderate) priced restaurant -- how about [portuguese](cuisine) -- im looking for something [expensive](price) -- how about [french](cuisine) food -- what about a [chinese](cuisine) restaurant in the north of town -- ah [gastropub](cuisine) food -- [jamaican](cuisine) -- [expensive](price) restaurant serving [portuguese](cuisine) food -- im looking for [portuguese](cuisine) food -- [catalan](cuisine) -- how about [turkish](cuisine) -- i need [cheap](price) [hungarian](cuisine) restaurant -- id like a [cheap](price) restaurant in the [north](location) part of town -- i want a [moderate](price) priced restaurant in the [south](location) of town -- a restaurant [cheap](price) [north](location) part of town -- for [six](people:6) please -- i need a [moderately](price:moderate) priced restaurant that serves [world](cuisine) food -- id like to find a [cheap](price) restaurant in the [west](location) part of town -- find a [cheap](price) restaurant -- [expensive](price) european -- id like a [moderately](price:moderate) priced restaurant that serves [cuban](cuisine) food please -- in the [north](location) part of town -- [turkish](cuisine) type of food -- im looking for a restaurant in any area that serves [welsh](cuisine) food -- [malaysian](cuisine) food -- how bout [chinese](cuisine) food -- [expensive](price) restaurants -- [moderately](price:moderate) priced in the [north](location) part of town -- the [east](location) part -- [cheap](price) restaurant [south](location) part of town -- [swiss](cuisine) -- how about [indian](cuisine) -- i want a restaurant in the [east](location) part of town that serves [singaporean](cuisine) food -- im looking for an [expensive](price) restaurant that serves signaporean food -- what about [korean](cuisine) food -- im looking for a restaurant in the [north](location) part of town that serves [african](cuisine) food -- [moderate](price) -- im looking for a [moderately](price:moderate) priced restaurant in the [north](location) part of town -- restaurant in the [west](location) part of town that serves [cuban](cuisine) food -- [european](cuisine) food -- can i get a restaurant with [vegetarian](cuisine) food -- i would like to find an [expensive](price) restaurant that serves [corsica](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant that serves [traditional](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant in the [south](location) of town -- [moderately](price:moderate) priced restaurant serving [moroccan](cuisine) food -- i am looking for a restaurant in the [south](location) part of town and it should serve [cantonese](cuisine) food -- uh [cheap](price) restaurant -- [moderately](price:moderate) priced in the [west](location) -- [indian](cuisine) type of food -- i would like a [cheap](price) restaurant in the [west](location) part of town -- i want to find a [cheap](price) restaurant -- uh a [cheap](price) restaurant in the [east](location) part of town -- im looking for a restaurant in the [north](location) part of town serving [indian](cuisine) food -- [moderately](price:moderate) priced restaurant that serves [thai](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- im looking for a restaurant in the [east](location) part of town that serves [afghan](cuisine) food -- [expensive](price) restaurant in the [east](location) part of town -- im looking for a [moroccan](cuisine) restaurant in the center of town -- im looking for a [moderately](price:moderate) priced restaurant in the [south](location) part of town -- [british](cuisine) expensive -- how about [cheap](price) -- in the [west](location) part of town -- in [cheap](price) restaurant -- i need a [cheap](price) restaurant in the [south](location) part of town -- in a [moderate](price:mid) price range please -- [belgian](cuisine) -- im looking for a [moderately](price:moderate) priced restaurant and it should be in the [north](location) of town -- hi im looking for a [cheap](price) restaurant in the [south](location) part of town -- [north american](cuisine) food -- how about [moderately](price:moderate) priced thai food -- restaurant in any area [international](cuisine) food -- [asian oriental](cuisine) type of food -- i want a [british](cuisine) restaurant in the [east](location) part of town -- [international](cuisine) food -- im looking for a restaurant in the center that serves [caribbean](cuisine) food -- [expensive](price) restaurant in the [east](location) -- i want [christmas](cuisine) food -- what about [french](cuisine) food -- [swedish](cuisine) food -- restaurant in the [south](location) part of town -- im looking for a restaurant that serves [gastropub](cuisine) food any price range -- im looking for a restaurant in the [south](location) part of town -- im looking for a restaurant in the [east](location) part of town thats [expensive](price) -- how about [vietnamese](cuisine) food -- with [british](cuisine) food -- id like an [expensive](price) restaurant -- [moderate](price) priced restaurant in the [north](location) part of town -- im looking for an [expensive](price) restaurant and it should be served [international](cuisine) food -- that serves [corsica](cuisine) -- im looking for a restaurant in the [north](location) part of town serving [malaysian](cuisine) food -- i need to find a restaurant in the [north](location) part of town that serves [jamaican](cuisine) food -- um im looking for a restaurant in the center part of town that serves [lebanese](cuisine) food -- what [asian oriental](cuisine) type of food -- looking for a [cheap](price) restaurant in the [east](location) part of town -- [cheap](price) [jamaican](cuisine) -- how about [asian oriental](cuisine) -- i need a [moderately](price:moderate) priced restaurant that serves bask food -- looking for an [expensive](price) restaurant in the [south](location) part of town -- [west](location) side -- looking for [afghan](cuisine) good -- im looking for a restaurant that serves [danish](cuisine) food -- yea i would like [korean](cuisine) food -- im looking for a restaurant in the [west](location) part of town that serves [jamaican](cuisine) food -- im looking for a restaurant in central it should serve [japanese](cuisine) food -- im looking for a [moderately](price:moderate) priced restaurant that serves [vietnamese](cuisine) food -- restaurant in the [west](location) part of town that serves airitran food -- [british](cuisine) food -- price over food [west](location) part of town -- restaurant [moderate](price) price -- in the [east](location) part of town -- im looking for an [expensive](price) restaurant and it should be the [east](location) part of town -- looking for a [cheap](price) restaurant in the [west](location) part of town -- i want a restaurant in the center that serves [seafood](cuisine) -- how about a restaurant in the east part of town that serves [indian](cuisine) food -- the [north](location) -- i would like [australian](cuisine) foo -- im looking for an [expensive](price) restaurant that serves [unusual](cuisine) food -- [mediterranean](cuisine) -- im looking for a [cheap](price) restaurant in the [south](location) part of town -- how about [chinese](cuisine) -- instead could it be with [french](cuisine) food -- [south](location) part of town please -- im looking for a [malaysian](cuisine) restaurant in the [north](location) part of town -- im looking for a [moderately](price:moderate) priced restaurant uh serving [halal](cuisine) food -- [cuban](cuisine) food -- looking for [moderately](price:moderate) priced [russian](cuisine) food -- id like a restaurant in the [south](location) part of town -- [moderately](price:moderate) priced restaurant in the [east](location) part of town -- im looking for a restaurant in any area that serves [russian](cuisine) food -- serving [modern european](cuisine) food -- how about [asian oriental](cuisine) type of food -- i want to find a [moderately](price:moderate) priced restaurant that serves [cuban](cuisine) food -- restaurant [east](location) -- [indian](cuisine) food -- a restaurant [south](location) part of town -- i want a [moderately](price:moderate) priced restaurant in the [north](location) part of town -- [expensive](price) restaurant [south](location) part of town -- how about [thai](cuisine) food -- [moderate](price) price -- [bistro](cuisine) -- im looking for a [expensive](price) restaurant in the [east](location) part of town -- i need a [cuban](cuisine) restaurant that is [moderately](price:moderate) priced -- i want to find a [moderately](price:moderate) priced restaurant and it should serve asian food -- what about [north american](cuisine) type of food -- [moderately](price:moderate) priced -- id like a restaurant in the [north](location) part of town that serves cross over food -- i want an [expensive](price) restaurant that serves sea food -- im looking for a restaurant that serves [bistro](cuisine) food -- [cheap](price) restaurants -- im looking for a [cheap](price) restaurant -- i want a restaurant in the [west](location) part of town that serves [australian](cuisine) food -- im looking for a restaurant in the [north](location) part of town serving [lebanese](cuisine) food -- [singaporean](cuisine) -- [cheap](price) [italian](cuisine) food -- restaurant [west](location) part of town [danish](cuisine) food -- [british](cuisine) -- im looking for a restaurant that serves [afghan](cuisine) food -- what about [european](cuisine) food -- its a restaurant in the south of town that serves [italian](cuisine) food -- i need a restaurant serving [corsica](cuisine) food -- something serving [swiss](cuisine) food -- im looking for a [korean](cuisine) restaurant in the [expensive](price) price range -- um an [expensive](price) -- i want a restaurant in the [moderate](price) price range -- im looking for a restaurant that serves [cuban](cuisine) food -- i want to find a restaurant in the [north](location) part of town -- [gastropub](cuisine) food -- i am looking for an [expensive](price) restaurant that serves -- [north](location) north part of town -- a [cheap](price) restaurant in the [south](location) part of town -- [cheap](price) -- [expensive](price) restaurant -- [scottish](cuisine) -- [fusion](cuisine) food -- id like to find a restaurant that serves [afghan](cuisine) food -- uh i want [cantonese](cuisine) food -- i would like a [moderately](price:moderate) priced restaurant in the [west](location) part of town -- im looking for an [expensive](price) restaurant that serves [greek](cuisine) food -- id like an [expensive](price) restaurant that serves bat food -- in the [east](location) part -- can you book a table in [rome](location) in a [moderate](price:mid) price range with [british](cuisine) food for [four](people:4) people -- [cheap](price) restaurant serving unintelligible food -- in the [west](location) -- [creative](cuisine) food -- im looking for a restaurant in the [east](location) part of town serving [japanese](cuisine) food -- im looking for a restaurant in the [north](location) part of town serving [jamaican](cuisine) food -- what about a [spanish](cuisine) restuarant -- im looking for a restaurant in any area that serves [polynesian](cuisine) food -- i want to find an [expensive](price) restaurant that serves [swedish](cuisine) food -- [european](cuisine) food any price -- with [french](cuisine) cuisine -- uh are there any that serves [mediterranean](cuisine) -- [cheap](price) restaurant that serves [german](cuisine) food -- i want a [moderately](price:moderate) priced restaurant that serves [mediterranean](cuisine) food -- [african](cuisine) food -- im looking for a restaurant in the [south](location) part of town serving [korean](cuisine) food -- i want to find a [expensive](price) restaurant in the [south](location) part of town -- i want a [moderately](price:moderate) priced -- [canapes](cuisine) food -- ok how about [chinese](cuisine) food -- i need a restaurant in the center of town that serves [spanish](cuisine) food -- uh [world](cuisine) food -- i need a [cheap](price) restaurant -- im looking for [thai](cuisine) - -## intent:request_info -- do you have their [address](info) -- do you have its [phone number](info) -- can i have their [phone number](info) -- what is the [phone number](info) of the restaurant -- what is their [address](info) -- may i have the [address](info) of the restaurant -- whats their [address](info) -- do you have their [phone number](info) -- give me their [phone number](info) -- whats their [phone number](info) -- do you have its [address](info) -- may i have the [phone number](info) of the restaurant -- what is their [phone number](info) -- give me their [address](info) -- can i have their [address](info) -- what is the [address](info) of the restaurant - -## intent:thankyou -- um thank you good bye -- okay cool uh good bye thank you -- okay thank you good bye -- you rock -- and thats all thank you and good bye -- thank you and good bye -- sorry about my mistakes thank you good bye -- thank you good bye -- thank you goodbye -- okay thank you goodbye -- uh thank you good bye -- thank you goodbye -- thank you -- okay thank you -- thanks goodbye -- ah thank you goodbye -- thank you -- thank you good bye -- thanks -- thank you goodbye -- uh okay thank you good bye -- thank you bye -- um okay thank you good bye diff --git a/examples/restaurantbot/data/stories.md b/examples/restaurantbot/data/stories.md deleted file mode 100644 index 08e5c403be96..000000000000 --- a/examples/restaurantbot/data/stories.md +++ /dev/null @@ -1,27208 +0,0 @@ -## story_00914561 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03812903 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00055028 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04649138 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00832024 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07265875 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02879216 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04818543 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07401415 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07314254 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03179721 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00008968 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01856430 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08076703 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06422060 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00264703 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00660267 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02753259 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00496705 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04071825 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02065778 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00350794 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04824790 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08852263 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07460140 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07720463 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07930584 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06519313 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03248462 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08607862 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06360277 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03043652 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07408662 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01743655 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05692561 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03068439 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00749657 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03380511 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06422073 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09031916 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01978688 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04854177 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01399547 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01912632 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04612031 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06463518 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09999406 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01480014 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09053690 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03827935 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02993258 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05416743 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07596709 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00797530 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00153088 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07360837 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06027650 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00760313 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "rome", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09866647 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00624668 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09174227 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02413435 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03736207 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02843759 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07728797 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08999407 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04855656 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00252998 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06289573 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07003503 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04378975 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01209067 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01411182 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08103150 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02467890 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06829456 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09351905 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08944879 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09232261 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02664959 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05047322 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03993480 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01984348 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04592751 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09107150 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07407088 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09044014 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06301857 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04834267 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07308412 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08908895 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01289187 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01937563 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05088706 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09156236 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05268845 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04910721 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09452611 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00319959 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05133868 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06452510 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08526348 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00282948 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06641526 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08859037 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00008217 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01023896 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04705724 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08574082 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09464717 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03114526 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00481967 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05718942 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09924129 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08654911 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04622584 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07235737 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00196299 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03874811 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06072887 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08654334 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04109384 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00978965 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06840076 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04239724 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09090049 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04363473 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00292739 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08844467 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06812665 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03851125 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09004962 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04062695 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04531146 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08162169 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01498425 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03900022 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01611465 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02776468 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07116144 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09424206 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00512103 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06202913 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08813673 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09506631 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02056193 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05553278 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05646905 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06882697 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05241672 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05836751 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04721693 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00770250 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05479819 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00494816 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09635922 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00166850 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09144866 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07590883 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03361929 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07093964 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01995661 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01795624 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05316798 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06296526 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05730166 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08611434 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05526723 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01980989 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08103236 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07071838 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05407160 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07910601 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02307960 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01749191 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07420454 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01541159 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08330167 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09407900 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02658648 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08761159 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09442789 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07218685 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08699011 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02841802 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03987178 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00112984 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07260710 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01365930 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05120672 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04325128 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09917382 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07932481 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04779175 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07923412 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07688327 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08050237 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00790432 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05477857 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08723729 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02145941 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02904309 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01750267 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08931260 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04239519 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09899499 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09451894 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05371750 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00501978 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09072588 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04582314 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02085011 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01486105 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08346274 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06655353 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00487479 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08015903 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04625807 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07380679 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09277738 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06093387 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04892036 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01978796 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03821257 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02199982 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09541007 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03036720 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01854820 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06568898 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05649369 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08843346 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05316570 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05358117 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09519635 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09472164 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00615105 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00755596 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07083125 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05765099 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03535337 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05644691 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06674428 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01726720 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05055076 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08970965 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09352826 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05365523 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06000199 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04273838 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01270052 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06071407 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02586833 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03374461 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00320931 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08849930 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01807635 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05866804 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00240913 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04740686 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09772586 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08676027 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02540704 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06208724 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06554752 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09403074 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03984404 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05700104 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01064359 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00317473 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01597569 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06659073 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00435977 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01048473 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09745793 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02995684 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01091743 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06153610 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05036312 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02891253 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07783774 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08930502 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07255380 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05514290 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01373980 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00309685 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06964204 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00813497 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08033594 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00567104 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01150384 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05429965 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04071153 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02008888 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09630554 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07725040 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03551647 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03397670 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00056459 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08479801 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04324212 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02693697 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00287919 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07004549 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08705460 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08036201 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09331784 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01094208 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09614535 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04907440 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05190938 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06127194 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03531922 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00150625 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08760628 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05825522 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05485352 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06152786 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03468744 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02483313 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04524370 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08553699 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06155115 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07396521 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09218245 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00707850 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00625551 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01995917 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00009611 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00407170 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03224749 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01812983 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04374466 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07767341 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03973524 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - -## story_07812635 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06011722 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07613896 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00029804 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02635044 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08291350 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06125512 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06576772 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01283865 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03117569 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03258323 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00339868 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07011583 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06529306 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07934293 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01340261 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05727944 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08095602 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07744301 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06311913 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00778945 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03264243 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00732555 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02579568 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09576867 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00679041 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08288637 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01644532 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06698936 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08064956 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03224212 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05627930 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04981783 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04951933 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06109731 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00804842 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09231986 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03435211 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05044915 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04320483 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03272467 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00130318 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06774200 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03569719 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04415388 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09663817 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09140372 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04024668 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02250911 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07026439 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09025690 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08062978 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09012310 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03226177 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07217864 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06723191 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09874308 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07353530 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01780813 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09116360 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06478210 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06428553 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08772874 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08825416 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01226483 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05839985 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07551950 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09290570 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00460044 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08340994 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07040871 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09860021 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00345785 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08546539 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02555736 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09181244 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05537251 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02959702 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05083411 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09748536 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09561957 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05700339 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08358757 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03651428 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08265138 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04662800 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03897194 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08275525 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04053749 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03563777 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08121610 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00749297 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01653073 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02597905 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00952448 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08262314 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03645990 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07291868 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09899206 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02191707 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08040444 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04441788 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01260372 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01839285 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05525385 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01271574 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03181821 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03876284 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01452194 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00156552 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04206739 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00839949 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09007272 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07334460 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05216830 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00519879 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04577592 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03590482 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03492103 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06001770 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05772832 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04383021 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04240637 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01854146 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08385321 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01146386 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03529821 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07870026 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05488816 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06180843 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09241640 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08821976 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06840821 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06578239 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07696073 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08574664 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04972497 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09572174 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02168130 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08170687 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09650629 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07085793 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00808985 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06805887 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09229085 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07072585 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05234126 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01604746 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01455955 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04008931 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05447297 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08821946 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00466675 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02593170 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05062698 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01070654 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00034683 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01264328 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03535906 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06889283 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01906313 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00711706 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09784883 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07815326 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06473770 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01814609 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05455518 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01305557 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06484471 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09089141 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05557115 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08149316 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02242155 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04769285 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09146248 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00536652 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07964398 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00251195 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06556520 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08164795 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03095701 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07129734 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02504254 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03079768 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04764846 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04549426 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04845907 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09501145 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04887912 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09815825 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00365957 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02128834 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00895793 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00156329 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06823684 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03189716 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06677556 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09834580 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03043608 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01080107 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00309451 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00750605 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01959352 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00844474 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08918527 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02712266 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03217729 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09971189 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05683202 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08055076 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05409864 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02675405 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09905940 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01114266 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09197344 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02195594 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08441847 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01259421 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03028201 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09735512 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02969153 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07861584 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01861546 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02143971 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00844567 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02272099 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00781021 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03902588 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04992470 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06482833 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07258658 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08776751 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05340573 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01263649 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04161709 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07467763 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00568872 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02622089 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06239058 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04950138 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02428348 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02512396 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07176272 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02965882 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00458333 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08889369 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09904271 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07337015 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09964423 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04430664 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02061383 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08368858 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06865110 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04755349 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08653684 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06782231 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00524308 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00933108 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09816555 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06781617 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01160443 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04244380 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00849025 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06603544 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09753029 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03874013 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09085199 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03142136 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02365589 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09744970 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07472413 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08186840 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08063971 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00624478 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04427174 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05484960 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02449624 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03439017 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07255272 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03604699 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09860386 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04062984 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03941254 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00171217 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06811483 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01877039 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00969777 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03983773 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09662563 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06031304 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06667107 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07420764 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01886993 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07703774 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09421887 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02826756 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00118580 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04773474 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00776014 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04905103 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04072855 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01706941 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00303134 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "madrid", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01385833 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02725173 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05444359 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04820873 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04166050 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08464725 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09530558 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01399039 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05254905 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09668676 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05372829 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03066118 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05113464 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01865268 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03632414 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02423947 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07713696 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08704162 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03562838 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09822341 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06505143 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00450317 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03676706 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06255358 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09628610 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08373112 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03628824 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05225970 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06496178 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02623324 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05818644 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08101371 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03320883 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06821629 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03159066 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00734264 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07777621 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04252545 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06024388 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06714841 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08129380 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01172047 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06439574 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00593551 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02428697 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06510143 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09073912 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03631655 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01208285 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05275649 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00789633 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09606751 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09656886 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00830863 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07537213 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09314942 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08940771 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09043068 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03290139 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08852898 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05740152 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06058369 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00152569 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09811824 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00957490 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00919373 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07279664 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04854964 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00980793 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01065431 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01321177 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06376839 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01929540 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06506308 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04181925 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00709765 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00938138 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00638016 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05850607 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02751982 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04296137 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04876842 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07171758 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08485578 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01314086 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03724124 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06585817 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00187798 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01933633 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07144106 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05615854 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05206907 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09625580 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03118434 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04889196 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05274495 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04636405 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07145375 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03694903 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08117383 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01220821 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00893195 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01317886 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03348323 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07533049 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07826987 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08516839 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02817336 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09089819 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03497250 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09330259 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08349832 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09157416 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00131261 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06076209 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04570409 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06805619 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00022643 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04937786 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06976592 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08424159 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09234537 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09862495 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05126643 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03337938 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03156665 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02957865 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00349487 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06798659 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09813964 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05131495 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07895394 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08823894 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09702036 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02282032 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01130224 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00895499 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02600877 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03949551 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04176053 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08679366 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01552180 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01393050 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01233425 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00898140 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00902206 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08674334 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05505791 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04410049 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05972136 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08344798 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04798183 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06230240 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01434051 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01094264 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00393178 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08422070 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09244706 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06328099 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04284245 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05686914 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08073789 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04518461 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08600837 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08425819 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00469483 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09908909 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02726499 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00122274 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04605461 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01127782 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07047906 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05382785 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09951364 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07714658 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05753493 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02319303 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03053117 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08039509 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06063036 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01842291 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00404771 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06303012 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08826114 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08580853 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03270022 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06783355 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00816632 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04796764 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07985063 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09178086 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01643031 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09604583 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07887953 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09163759 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05195314 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06757748 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06250942 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09472081 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07167680 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06327458 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09740907 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01812192 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04856578 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04325428 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01398241 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04724172 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "eight"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07606335 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05715031 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05296588 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08104850 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05853971 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04727618 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01711241 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "eight", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03936603 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08576952 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04804222 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00268133 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07483909 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06777191 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02494821 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00239185 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04801990 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06766036 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06440930 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02380468 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04005855 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07405507 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00498578 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00109081 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04689909 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01583175 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06081230 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09650700 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08683423 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "rome"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09809140 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08704448 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05846298 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01001501 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08879496 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07666308 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07775524 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06657612 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03634704 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "bombay"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09374425 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03996025 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "madrid"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09354384 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05281037 -* greet - - utter_ask_howcanhelp -* inform{"price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00319688 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06274716 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00091875 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03406779 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05395046 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04030638 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08550472 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "paris", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07104468 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07688540 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01518753 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09864436 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04577171 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09610292 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03166920 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03469112 -* greet - - utter_ask_howcanhelp -* inform{"people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06962015 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00278172 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08966504 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02470749 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04454934 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05034229 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00180528 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08515298 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06859902 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00392312 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05725205 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07300290 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "four", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06309353 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "people": "six", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03854799 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09566883 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01043845 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00253574 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03763786 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02144977 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09850809 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09163022 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07274687 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08837527 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00921838 -* greet - - utter_ask_howcanhelp -* inform{"people": "four"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05904994 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08347700 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06500049 -* greet - - utter_ask_howcanhelp -* inform{"people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02620391 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09116560 -* greet - - utter_ask_howcanhelp -* inform{"people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09270102 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06726927 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09168519 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09385493 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01287768 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01543338 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02541285 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09271185 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00367264 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "four", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04229414 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "four"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08134123 -* greet - - utter_ask_howcanhelp -* inform{"people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03235504 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03978119 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09361073 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "two", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03261321 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00496996 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06118884 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08182507 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00422529 -* greet - - utter_ask_howcanhelp -* inform{"location": "london"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03013594 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "people": "two"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09008945 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03370167 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04723733 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02172992 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04436511 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07834269 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08319983 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06524055 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06490936 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03364039 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "location": "madrid", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03853664 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06995338 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09243427 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01653797 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "paris", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03279828 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09879267 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04204187 -* greet - - utter_ask_howcanhelp -* inform{"people": "eight"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* inform{"location": "london"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00656127 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "italian"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"cuisine": "french"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02034035 -* greet - - utter_ask_howcanhelp -* inform{"location": "london", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07316004 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03687127 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "price": "expensive"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03560336 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09441270 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "price": "cheap"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01872699 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "two", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02048330 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "rome"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06319392 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09516193 -* greet - - utter_ask_howcanhelp -* inform{"location": "madrid", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02657528 -* greet - - utter_ask_howcanhelp -* inform{"location": "bombay", "people": "two"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00310392 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04983227 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "rome", "price": "moderate"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_09239368 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "french"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_01412677 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "location": "paris", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "spanish"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03771921 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "location": "london", "people": "four"} - - utter_on_it - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02587073 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "paris"} - - utter_on_it - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04309861 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "london"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05490968 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "people": "six"} - - utter_on_it - - utter_ask_location -* inform{"location": "london"} - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_00145741 -* greet - - utter_ask_howcanhelp -* inform{"location": "rome", "people": "eight", "price": "expensive"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05547840 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_04527193 -* greet - - utter_ask_howcanhelp -* inform{"location": "paris", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03326363 -* greet - - utter_ask_howcanhelp -* inform{"people": "six"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08338160 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "spanish", "people": "six", "price": "moderate"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08226375 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "british", "location": "london", "people": "six"} - - utter_on_it - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07961733 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian"} - - utter_on_it - - utter_ask_location -* inform{"location": "madrid"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02606355 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "spanish"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "eight"} - - utter_ask_moreupdates -* inform{"location": "madrid"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"cuisine": "british"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_06093780 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french"} - - utter_on_it - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "four"} - - utter_ask_price -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07143287 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "french", "people": "four", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "bombay"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* inform{"people": "two"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_03162809 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "indian"} - - utter_ask_location -* inform{"location": "paris"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_02589793 -* greet - - utter_ask_howcanhelp -* inform{"price": "moderate"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"cuisine": "italian"} - - utter_ask_moreupdates -* inform{"location": "bombay"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_08740741 -* greet - - utter_ask_howcanhelp -* inform{"price": "cheap"} - - utter_on_it - - utter_ask_cuisine -* inform{"cuisine": "british"} - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "six"} - - utter_ask_moreupdates -* inform{"location": "paris"} - - utter_ask_moreupdates -* inform{"price": "expensive"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_05389447 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "italian", "location": "rome", "people": "eight"} - - utter_on_it - - utter_ask_price -* inform{"price": "cheap"} - - utter_ask_moreupdates -* inform{"cuisine": "indian"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - -## story_07621761 -* greet - - utter_ask_howcanhelp -* inform{"cuisine": "indian", "price": "cheap"} - - utter_on_it - - utter_ask_location -* inform{"location": "rome"} - - utter_ask_numpeople -* inform{"people": "two"} - - utter_ask_moreupdates -* inform{"price": "moderate"} - - utter_ask_moreupdates -* inform{"people": "four"} - - utter_ask_moreupdates -* deny - - utter_ack_dosearch - - action_search_restaurants - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* deny - - utter_ack_findalternatives - - action_suggest -* affirm - - utter_ack_makereservation -* thankyou - - utter_goodbye - diff --git a/examples/restaurantbot/domain.yml b/examples/restaurantbot/domain.yml deleted file mode 100644 index 3ffc3a0ecd4e..000000000000 --- a/examples/restaurantbot/domain.yml +++ /dev/null @@ -1,82 +0,0 @@ -slots: - cuisine: - type: text - people: - type: text - location: - type: text - price: - type: text - info: - type: text - matches: - type: unfeaturized - -entities: -- location -- info -- people -- price -- cuisine - -intents: - - greet - - affirm - - deny - - inform - - thankyou - - request_info - -templates: - utter_greet: - - text: "hey there!" - utter_goodbye: - - text: "goodbye :(" - - text: "Bye-bye" - utter_default: - - text: "default message" - utter_ack_dosearch: - - text: "ok let me see what I can find" - utter_ack_findalternatives: - - text: "ok let me see what else there is" - utter_ack_makereservation: - - text: "ok making a reservation for restaurant (price={price} cuisine={cuisine}) in location={location} for count={people}? " - buttons: - - title: "thank you" - payload: "thank you" - utter_ask_cuisine: - - text: "what kind of cuisine would you like?" - utter_ask_howcanhelp: - - text: "how can I help you?" - utter_ask_location: - - text: "where?" - utter_ask_moreupdates: - - text: "if you'd like to modify anything else, please tell me what. This is what I currently have: {location} (price: {price}, cuisine: {cuisine}) for {people} people." - utter_ask_numpeople: - - text: "for how many people?" - utter_ask_price: - - text: "in which price range?" - buttons: - - title: "cheap" - payload: "cheap" - - title: "expensive" - payload: "expensive" - utter_on_it: - - text: "I'm on it" - -actions: -- utter_greet -- utter_goodbye -- utter_default -- utter_ack_dosearch -- utter_ack_findalternatives -- utter_ack_makereservation -- utter_ask_cuisine -- utter_ask_howcanhelp -- utter_ask_location -- utter_ask_moreupdates -- utter_ask_numpeople -- utter_ask_price -- utter_on_it -- action_search_restaurants -- action_suggest diff --git a/examples/restaurantbot/endpoints.yml b/examples/restaurantbot/endpoints.yml deleted file mode 100644 index 56d98ea861fc..000000000000 --- a/examples/restaurantbot/endpoints.yml +++ /dev/null @@ -1,2 +0,0 @@ -action_endpoint: - url: http://localhost:5055/webhook diff --git a/examples/restaurantbot/policy.py b/examples/restaurantbot/policy.py deleted file mode 100644 index 6f48652640ed..000000000000 --- a/examples/restaurantbot/policy.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging - -from rasa.core.policies.keras_policy import KerasPolicy - -logger = logging.getLogger(__name__) - - -class RestaurantPolicy(KerasPolicy): - def model_architecture(self, input_shape, output_shape): - """Build a Keras model and return a compiled model.""" - from tensorflow.keras.models import Sequential - from tensorflow.keras.layers import ( - Masking, - LSTM, - Dense, - TimeDistributed, - Activation, - ) - - # Build Model - model = Sequential() - - # the shape of the y vector of the labels, - # determines which output from rnn will be used - # to calculate the loss - if len(output_shape) == 1: - # y is (num examples, num features) so - # only the last output from the rnn is used to - # calculate the loss - model.add(Masking(mask_value=-1, input_shape=input_shape)) - model.add(LSTM(self.rnn_size)) - model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1])) - elif len(output_shape) == 2: - # y is (num examples, max_dialogue_len, num features) so - # all the outputs from the rnn are used to - # calculate the loss, therefore a sequence is returned and - # time distributed layer is used - - # the first value in input_shape is max dialogue_len, - # it is set to None, to allow dynamic_rnn creation - # during prediction - model.add(Masking(mask_value=-1, input_shape=(None, input_shape[1]))) - model.add(LSTM(self.rnn_size, return_sequences=True)) - model.add(TimeDistributed(Dense(units=output_shape[-1]))) - else: - raise ValueError( - "Cannot construct the model because" - "length of output_shape = {} " - "should be 1 or 2." - "".format(len(output_shape)) - ) - - model.add(Activation("softmax")) - - model.compile( - loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] - ) - - logger.debug(model.summary()) - return model diff --git a/examples/restaurantbot/run.py b/examples/restaurantbot/run.py deleted file mode 100644 index da4954d115f3..000000000000 --- a/examples/restaurantbot/run.py +++ /dev/null @@ -1,114 +0,0 @@ -import argparse -import asyncio -import logging -from typing import Text - -import os -import rasa.utils.io -import rasa.train -from examples.restaurantbot.policy import RestaurantPolicy -from rasa.core.agent import Agent -from rasa.core.policies.memoization import MemoizationPolicy -from rasa.core.policies.mapping_policy import MappingPolicy - -logger = logging.getLogger(__name__) - - -async def parse(text: Text, model_path: Text): - agent = Agent.load(model_path) - - response = await agent.handle_text(text) - - logger.info("Text: '{}'".format(text)) - logger.info("Response:") - logger.info(response) - - return response - - -async def train_core( - domain_file: Text = "domain.yml", - model_directory: Text = "models", - model_name: Text = "current", - training_data_file: Text = "data/stories.md", -): - agent = Agent( - domain_file, - policies=[ - MemoizationPolicy(max_history=3), - MappingPolicy(), - RestaurantPolicy(batch_size=100, epochs=100, validation_split=0.2), - ], - ) - - training_data = await agent.load_data(training_data_file, augmentation_factor=10) - agent.train(training_data) - - # Attention: agent.persist stores the model and all meta data into a folder. - # The folder itself is not zipped. - model_path = os.path.join(model_directory, model_name, "core") - agent.persist(model_path) - - logger.info("Model trained. Stored in '{}'.".format(model_path)) - - return model_path - - -def train_nlu( - config_file="config.yml", - model_directory: Text = "models", - model_name: Text = "current", - training_data_file="data/nlu.md", -): - from rasa.nlu.training_data import load_data - from rasa.nlu import config - from rasa.nlu.model import Trainer - - training_data = load_data(training_data_file) - trainer = Trainer(config.load(config_file)) - trainer.train(training_data) - - # Attention: trainer.persist stores the model and all meta data into a folder. - # The folder itself is not zipped. - model_path = os.path.join(model_directory, model_name) - model_directory = trainer.persist(model_path, fixed_model_name="nlu") - - logger.info("Model trained. Stored in '{}'.".format(model_directory)) - - return model_directory - - -if __name__ == "__main__": - rasa.utils.io.configure_colored_logging(loglevel="INFO") - - parser = argparse.ArgumentParser(description="Restaurant Bot") - - subparser = parser.add_subparsers(dest="subparser_name") - train_parser = subparser.add_parser("train", help="train a core or nlu model") - parse_parser = subparser.add_parser("predict", help="predict next action") - - parse_parser.add_argument( - "--model", - default="models/current", - help="Path to the model directory which contains " - "sub-folders for core and nlu models.", - ) - parse_parser.add_argument("--text", default="hello", help="Text to parse.") - - train_parser.add_argument( - "model", - choices=["nlu", "core"], - help="Do you want to train a NLU or Core model?", - ) - args = parser.parse_args() - - loop = asyncio.get_event_loop() - - # decide what to do based on first parameter of the script - if args.subparser_name == "train": - if args.model == "nlu": - train_nlu() - elif args.model == "core": - loop.run_until_complete(train_core()) - elif args.subparser_name == "predict": - loop.run_until_complete(parse(args.text, args.model)) diff --git a/examples/rules/actions.py b/examples/rules/actions.py new file mode 100644 index 000000000000..c86b7a4ec14a --- /dev/null +++ b/examples/rules/actions.py @@ -0,0 +1,20 @@ +from typing import Dict, Text, List + +from rasa_sdk import Tracker +from rasa_sdk.events import EventType +from rasa_sdk.executor import CollectingDispatcher +from rasa_sdk import Action +from rasa_sdk.events import SlotSet + + +class ValidateSlots(Action): + def name(self) -> Text: + """Unique identifier of the form""" + + return "action_validate_loop_q_form" + + def run( + self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict + ) -> List[EventType]: + dispatcher.utter_message("validate_some_slot") + return [SlotSet("some_slot", "sdk")] diff --git a/examples/rules/config.yml b/examples/rules/config.yml new file mode 100644 index 000000000000..700e8cbd7d6a --- /dev/null +++ b/examples/rules/config.yml @@ -0,0 +1,29 @@ +language: en + +pipeline: + - name: WhitespaceTokenizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + token_pattern: (?u)\b\w+\b + - name: DucklingHTTPExtractor + url: http://localhost:8000 + dimensions: + - number + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: FallbackClassifier + # If the highest ranked intent has a confidence lower than the threshold than + # the NLU pipeline predicts an intent `nlu_fallback` which you can then use in + # stories / rules to implement an appropriate fallback. + threshold: 0.5 + +policies: + - name: RulePolicy + # Confidence of the prediction if no rule matched and de-facto threshold for a + # core fallback. + core_fallback_threshold: 0.3 + # Name of the action which should be predicted if no rule matched. + core_fallback_action_name: "action_default_fallback" + # If `True` `core_fallback_action_name` is predicted in case no rule matched. + enable_fallback_prediction: True diff --git a/examples/rules/data/nlu.yml b/examples/rules/data/nlu.yml new file mode 100644 index 000000000000..c7bda17059b0 --- /dev/null +++ b/examples/rules/data/nlu.yml @@ -0,0 +1,318 @@ +version: "2.0" + +nlu: +- intent: greet + examples: | + - Hi + - Hey + - Hi bot + - Hey bot + - Hello + - Good morning + - hi again + - hi folks + - hi Mister + - hi pal! + - hi there + - greetings + - hello everybody + - hello is anybody there + - hello robot + - hallo + - heeey + - hi hi + - hey + - hey hey + - hello there + - hi + - hello + - yo + - hola + - hi? + - hey bot! + - hello friend + +- intent: request_restaurant + examples: | + - im looking for a restaurant + - can i get [swedish](cuisine) food in any area + - a restaurant that serves [caribbean](cuisine) food + - id like a restaurant + - im looking for a restaurant that serves [mediterranean](cuisine) food + - can i find a restaurant that serves [chinese](cuisine) + - i am looking for any place that serves [indonesian](cuisine) food for three + - i need to find a restaurant + - uh im looking for a restaurant that serves [kosher](cuisine) food + - uh can i find a restaurant and it should serve [brazilian](cuisine) food + - im looking for a restaurant serving [italian](cuisine) food + - restaurant please + - i'd like to book a table for two with [spanish](cuisine) cuisine + - i need a table for 4 + - book me a table for three at the [italian](cuisine) restaurant + - can you please book a table for 5? + - I would like to book a table for 2 + - looking for a table at the [mexican](cuisine) restaurant for five + - find me a table for 7 people + - Can I get a table for four at the place which server [greek](cuisine) food? + +- intent: affirm + examples: | + - yeah a cheap restaurant serving international food + - correct + - ye + - uh yes + - let's do it + - yeah + - uh yes + - um yes + - yes knocking + - that's correct + - yes yes + - right + - yea + - yes + - yes right + - yes and i dont care + - right on + - i love that + +- intent: deny + examples: | + - no + - no new selection + - no thanks + - no thank you + - uh no + - breath no + - do you have something else + - no this does not work for me + +- intent: inform + examples: | + - [afghan](cuisine) food + - how bout [asian oriental](cuisine) + - what about [indian](cuisine) food + - uh how about [turkish](cuisine) type of food + - um [english](cuisine) + - im looking for [tuscan](cuisine) food + - id like [moroccan](cuisine) food + - [seafood](cuisine) + - [french](cuisine) food + - serves [british](cuisine) food + - id like [canapes](cuisine) + - serving [jamaican](cuisine) food + - um what about [italian](cuisine) food + - im looking for [corsica](cuisine) food + - im looking for [world](cuisine) food + - serves [french](cuisine) food + - how about [indian](cuisine) food + - can i get [chinese](cuisine) food + - [irish](cuisine) food + - [english](cuisine) food + - [spanish](cuisine) food + - how bout one that serves [portuguese](cuisine) food and is cheap + - [german](cuisine) + - [korean](cuisine) food + - im looking for [romanian](cuisine) food + - serves [canapes](cuisine) food + - [gastropub](cuisine) + - i want [french](cuisine) food + - how about [modern european](cuisine) type of food + - it should serve [scandinavian](cuisine) food + - how [european](cuisine) + - how about [european](cuisine) food + - serves [traditional](cuisine) food + - [indonesian](cuisine) food + - [modern european](cuisine) + - serves [brazilian](cuisine) + - i would like [modern european](cuisine) food + - looking for [lebanese](cuisine) food + - [portuguese](cuisine) + - [european](cuisine) + - i want [polish](cuisine) food + - id like [thai](cuisine) + - i want to find [moroccan](cuisine) food + - [afghan](cuisine) + - [scottish](cuisine) food + - how about [vietnamese](cuisine) + - hi im looking for [mexican](cuisine) food + - how about [indian](cuisine) type of food + - [polynesian](cuisine) food + - [mexican](cuisine) + - instead could it be for four people + - any [japanese](cuisine) food + - what about [thai](cuisine) food + - how about [asian oriental](cuisine) food + - im looking for [japanese](cuisine) food + - im looking for [belgian](cuisine) food + - im looking for [turkish](cuisine) food + - serving [corsica](cuisine) food + - serving [gastro pub](cuisine:gastropub) + - is there [british](cuisine) food + - [world](cuisine) food + - im looking for something serves [japanese](cuisine) food + - id like a [greek](cuisine) + - im looking for [malaysian](cuisine) food + - i want to find [world](cuisine) food + - serves [pan asian](cuisine:asian) food + - looking for [afghan](cuisine) food + - that serves [portuguese](cuisine) food + - [asian oriental](cuisine:asian) food + - [russian](cuisine) food + - [corsica](cuisine) + - [asian oriental](cuisine:asian) + - serving [basque](cuisine) food + - how about [italian](cuisine) + - looking for [spanish](cuisine) food in the center of town + - it should serve [gastropub](cuisine) food + - [welsh](cuisine) food + - i want [vegetarian](cuisine) food + - im looking for [swedish](cuisine) food + - um how about [chinese](cuisine) food + - [world](cuisine) food + - can i have a [seafood](cuisine) please + - how about [italian](cuisine) food + - how about [korean](cuisine) + - [corsica](cuisine) food + - [scandinavian](cuisine) + - [vegetarian](cuisine) food + - what about [italian](cuisine) + - how about [portuguese](cuisine) food + - serving [french](cuisine) food + - [tuscan](cuisine) food + - how about uh [gastropub](cuisine) + - im looking for [creative](cuisine) food + - im looking for [malaysian](cuisine) food + - im looking for [unusual](cuisine) food + - [danish](cuisine) food + - how about [spanish](cuisine) food + - im looking for [vietnamese](cuisine) food + - [spanish](cuisine) + - a restaurant serving [romanian](cuisine) food + - im looking for [lebanese](cuisine) food + - [italian](cuisine) food + - a restaurant with [afghan](cuisine) food + - im looking for [traditional](cuisine) food + - uh i want [cantonese](cuisine) food + - im looking for [thai](cuisine) + - i want to seat [outside](seating) + - i want to seat [inside](seating) + - i want to seat [outdoor](seating) + - i want to seat [indoor](seating) + - let's go [inside](seating) + - [inside](seating) + - [outdoor](seating) + - prefer sitting [indoors](seating) + - I would like to seat [inside](seating) please + - I prefer sitting [outside](seating) + - my feedback is [good](feedback) + - my feedback is [great](feedback) + - it was [terrible](feedback) + - i consider it [success](feedback) + - you are [awful](feedback) + - for ten people + - 2 people + - for three people + - just one person + - book for seven people + - 2 please + - nine people + +- intent: thankyou + examples: | + - um thank you good bye + - okay cool uh good bye thank you + - okay thank you good bye + - you rock + - and thats all thank you and good bye + - thank you and good bye + - sorry about my mistakes thank you good bye + - noise thank you good bye + - thank you goodbye noise + - okay thank you goodbye + - uh thank you good bye + - thank you goodbye + - thank you goodbye noise thank you goodbye + - breath thank you goodbye + - thank you + - okay thank you + - thanks goodbye + - ah thank you goodbye + - thank you noise + - thank you good bye + - breath thank you very much goodbye + - thanks + - noise thank you goodbye + - unintelligible thank you goodbye + - uh okay thank you good bye + - thank you bye + - um okay thank you good bye + +- intent: chitchat + examples: | + - can you share your boss with me? + - i want to get to know your owner + - i want to know the company which designed you + - i want to know the company which generated you + - i want to know the company which invented you + - i want to know who invented you + - May I ask who invented you? + - please tell me the company who created you + - please tell me who created you + - tell me more about your creators + - tell me more about your founders + - Ahoy matey how are you? + - are you alright + - are you having a good day + - Are you ok? + - are you okay + - Do you feel good? + - how are things going + - how are things with you? + - How are things? + - how are you + - how are you doing + - how are you doing this morning + - how are you feeling + - how are you today + - How are you? + - How is the weather today? + - What's the weather like? + - How is the weather? + - What is the weather at your place? + - Do you have good weather? + - Is it raining? + - What's it like out there? + - Is it hot or cold? + - Beautiful day, isn't it? + - What's the weather forecast? + - Is it quite breezy outside? + +- intent: stop + examples: | + - ok then you cant help me + - that was shit, you're not helping + - you can't help me + - you can't help me with what i need + - i guess you can't help me then + - ok i guess you can't help me + - that's not what i want + - ok, but that doesnt help me + - this is leading to nothing + - this conversation is not really helpful + - you cannot help me with what I want + - I think you cant help me + - hm i don't think you can do what i want + - stop + - stop go back + - do you get anything? + - and you call yourself bot company? pff + - and that's it? + - nothing else? + +- intent: bot_challenge + examples: | + - are you a bot? + - are you a human? + - am I talking to a bot? + - am I talking to a human? diff --git a/examples/rules/data/rules.yml b/examples/rules/data/rules.yml new file mode 100644 index 000000000000..8258e869cc00 --- /dev/null +++ b/examples/rules/data/rules.yml @@ -0,0 +1,124 @@ +version: "2.0" + +rules: + +- rule: Greet + # This rule only applies to the start of a session. + conversation_start: True + steps: + - intent: greet + - action: utter_greet + +- rule: Activate form 'q_form' + steps: + - intent: activate_q_form + - action: loop_q_form + - active_loop: loop_q_form + +- rule: Example of an unhappy path for the 'loop_q_form' + condition: + # Condition that form is active. + - active_loop: loop_q_form + - slot_was_set: + - requested_slot: some_slot + steps: + # This unhappy path handles the case of an intent `explain`. + - intent: explain + - action: utter_explain_some_slot + # Return to form after handling the `explain` intent + - action: loop_q_form + - active_loop: loop_q_form + +- rule: Submit form + condition: + - active_loop: loop_q_form + steps: + - action: loop_q_form + - active_loop: null + - slot_was_set: + - requested_slot: null + # The action we want to run when the form is submitted. + - action: utter_stop + +- rule: FAQ question + steps: + - intent: ask_possibilities + - action: utter_list_possibilities + +- rule: Another FAQ example + steps: + - intent: switch_faq + - action: action_switch_faq + +- rule: FAQ simple + condition: + - slot_was_set: + - detailed_faq: false + steps: + - intent: faq + - action: utter_faq + +- rule: FAQ detailed + condition: + - slot_was_set: + - detailed_faq: true + steps: + - intent: faq + - action: utter_faq + # Don't predict `action_listen` after running `utter_faq` + wait_for_user_input: False + +- rule: FAQ helped - continue + condition: + - slot_was_set: + - detailed_faq: true + steps: + - action: utter_faq + - action: utter_ask_did_help + - intent: affirm + - action: utter_continue + +- rule: FAQ did not help + condition: + - slot_was_set: + - detailed_faq: true + steps: + - action: utter_faq + - action: utter_ask_did_help + - intent: deny + - action: utter_detailed_faq + # Don't predict `action_listen` after running `utter_faq` + wait_for_user_input: False + +- rule: Detailed FAQ did not help - continue + condition: + - slot_was_set: + - detailed_faq: true + steps: + - action: utter_detailed_faq + - action: utter_ask_did_help + - intent: deny + - action: utter_ask_stop + - intent: deny + - action: utter_continue + +- rule: Detailed FAQ did not help - stop + condition: + - slot_was_set: + - detailed_faq: true + steps: + - action: utter_detailed_faq + - action: utter_ask_did_help + - intent: deny + - action: utter_ask_stop + - intent: affirm + - action: utter_stop + +- rule: Implementation of the TwoStageFallbackPolicy + steps: + # This intent is automatically triggered by the `FallbackClassifier` in the NLU + # pipeline in case the intent confidence was below the specified threshold. + - intent: nlu_fallback + # The Fallback is implemented as now implemented as form. + - action: two_stage_fallback + - active_loop: two_stage_fallback diff --git a/examples/rules/domain.yml b/examples/rules/domain.yml new file mode 100644 index 000000000000..0aa7dbb09e34 --- /dev/null +++ b/examples/rules/domain.yml @@ -0,0 +1,93 @@ +version: "2.0" + +intents: +- activate_q_form +- inform +- explain +- stopp +- ask_possibilities +- faq +- affirm +- deny +- greet +- switch_faq +- nlu_fallback + +entities: +- some_slot + +slots: + some_slot: + type: unfeaturized + detailed_faq: + type: bool + +actions: +- utter_explain_some_slot +- action_stop_q_form +- utter_list_possibilities +- utter_faq +- utter_ask_did_help +- utter_continue +- utter_detailed_faq +- utter_ask_stop +- utter_stop +- utter_greet +- action_switch_faq +- utter_did_you_mean +# You can implement a custom action to validate extracted slots of your form. +# Return a `slot` event which sets the value to `None` in order to make the form request +# the slot again. You can also return `slot` events for other slots which you can +# extract as part of your custom action. +#- validate_loop_q_form + +forms: +- loop_q_form: + some_slot: + # Slot mappings can be defined in the domain. + # You can also implement custom slot mappings in your validate function for the + # `Form` by returning the desired slot events. + # The slot mappings follow the same syntax as currently in the SDK implementation. + - type: from_entity + entity: some_slot + # Example of a slot mapping which extracts the slot value from the message text if + # the intent is `greet`. + # - type: from_text + # intent: greet + # Example of a slot mapping which sets the slot to `my value` in case the message + # has an intent `greet` + # - type: from_intent + # intent: greet + # value: "my value" + +session_config: + session_expiration_time: 60 # value in minutes + carry_over_slots_to_new_session: true + +responses: + utter_ask_some_slot: + - text: "utter_ask_some_slot" + utter_explain_some_slot: + - text: "utter_explain_some_slot" + utter_list_possibilities: + - text: "utter_list_possibilities" + utter_faq: + - text: "utter_faq" + utter_ask_did_help: + - text: "utter_ask_did_help" + utter_continue: + - text: "utter_continue" + utter_detailed_faq: + - text: "utter_detailed_faq" + utter_ask_stop: + - text: "utter_ask_stop" + utter_stop: + - text: "utter_stop" + utter_greet: + - text: "utter_greet" + utter_did_you_mean: + - text: "utter_did_you_mean" + utter_revert_fallback_and_reapply_last_intent: + - text: "utter_revert_fallback_and_reapply_last_intent" + utter_default: + - text: "I give up." diff --git a/examples/rules/endpoints.yml b/examples/rules/endpoints.yml new file mode 100644 index 000000000000..e8d74ad61f3e --- /dev/null +++ b/examples/rules/endpoints.yml @@ -0,0 +1,2 @@ +action_endpoint: + url: http://localhost:5055/webhook diff --git a/examples/formbot/data/nlu.md b/examples/rules/rules_markdown/nlu.md similarity index 90% rename from examples/formbot/data/nlu.md rename to examples/rules/rules_markdown/nlu.md index 1a054956eceb..974e6c6d6f4b 100644 --- a/examples/formbot/data/nlu.md +++ b/examples/rules/rules_markdown/nlu.md @@ -35,20 +35,20 @@ - id like a restaurant - im looking for a restaurant that serves [mediterranean](cuisine) food - can i find a restaurant that serves [chinese](cuisine) -- i am looking for any place that serves [indonesian](cuisine) food for [three](num_people:3) +- i am looking for any place that serves [indonesian](cuisine) food for three - i need to find a restaurant - uh im looking for a restaurant that serves [kosher](cuisine) food - uh can i find a restaurant and it should serve [brazilian](cuisine) food - im looking for a restaurant serving [italian](cuisine) food - restaurant please -- i'd like to book a table for [two](num_people:2) with [spanish](cuisine) cuisine -- i need a table for [4](num_people) -- book me a table for [three](num_people:3) at the [italian](cuisine) restaurant -- can you please book a table for [5](num_people)? -- I would like to book a table for [2](num_people) -- looking for a table at the [mexican](cuisine) restaurant for [five](num_people:5) -- find me a table for [7](num_people) people -- Can I get a table for [four](num_people:4) at the place which server [greek](cuisine) food? +- i'd like to book a table for two with [spanish](cuisine) cuisine +- i need a table for 4 +- book me a table for three at the [italian](cuisine) restaurant +- can you please book a table for 5? +- I would like to book a table for 2 +- looking for a table at the [mexican](cuisine) restaurant for five +- find me a table for 7 people +- Can I get a table for four at the place which server [greek](cuisine) food? ## intent:affirm - yeah a cheap restaurant serving international food @@ -131,7 +131,7 @@ - how about [indian](cuisine) type of food - [polynesian](cuisine) food - [mexican](cuisine) -- instead could it be for [four](num_people:4) people +- instead could it be for four people - any [japanese](cuisine) food - what about [thai](cuisine) food - how about [asian oriental](cuisine) food @@ -202,13 +202,13 @@ - it was [terrible](feedback) - i consider it [success](feedback) - you are [awful](feedback) -- for [ten](num_people:10) people -- [2](num_people) people -- for [three](num_people:3) people -- just [one](num_people:1) person -- book for [seven](num_people:7) people -- 2[num_people] please -- [nine](num_people:9) people +- for ten people +- 2 people +- for three people +- just one person +- book for seven people +- 2 please +- nine people ## intent:thankyou - um thank you good bye diff --git a/examples/rules/rules_markdown/rules.md b/examples/rules/rules_markdown/rules.md new file mode 100644 index 000000000000..6d05215eab47 --- /dev/null +++ b/examples/rules/rules_markdown/rules.md @@ -0,0 +1,109 @@ +<!-- each story starting with `>>` will be perceived as independent rule --> + +>> Activate form 'q_form' +<!-- required slots for q_form are listed in the domain. --> + - ... <!-- `...` indicates that this rule applies at any point within a conversation --> +* activate_q_form <!-- like request_restaurant --> + - loop_q_form <!-- Activate and run form --> + - form{"name": "loop_q_form"} + + +>> Example of an unhappy path for the 'q_form' + - form{"name": "loop_q_form"} <!-- condition that form is active--> + - slot{"requested_slot": "some_slot"} <!-- some condition --> + - ... +* explain <!-- can be anything --> + - utter_explain_some_slot + - loop_q_form + - form{"name": "loop_q_form"} + + +>> submit form + - form{"name": "loop_q_form"} <!-- condition that form is active--> + - ... + - loop_q_form <!-- condition that form is active --> + - form{"name": null} + - slot{"requested_slot": null} + - utter_stop <!-- can be any action --> + + +>> FAQ question + - ... +* ask_possibilities + - utter_list_possibilities + + +>> Another FAQ example + - ... +* switch_faq + - action_switch_faq + + +>> FAQ simple + - slot{"detailed_faq": false} + - ... <!-- indicator that there might be a story before hand --> +* faq + - utter_faq +<!-- no ... means predict action_listen here --> + + +>> FAQ detailed + - slot{"detailed_faq": true} + - ... +* faq + - utter_faq + - ... <!-- don't predict action_listen by the rule --> + + +>> FAQ helped - continue + - slot{"detailed_faq": true} + - ... <!-- putting actions before ... shouldn't be allowed --> + - utter_faq + - utter_ask_did_help <!--problem: it will learn that after utter_faq goes utter_ask_did_help --> +* affirm + - utter_continue + + +>> FAQ not helped + - slot{"detailed_faq": true} + - ... + - utter_faq + - utter_ask_did_help +* deny + - utter_detailed_faq + - ... <!-- indicator that the story is continued, no action_listen --> + + +>> detailed FAQ not helped - continue + - slot{"detailed_faq": true} + - ... + - utter_detailed_faq + - utter_ask_did_help +* deny + - utter_ask_stop +* deny + - utter_continue + + +>> detailed FAQ not helped - stop + - slot{"detailed_faq": true} + - ... + - utter_detailed_faq + - utter_ask_did_help +* deny + - utter_ask_stop +* affirm + - utter_stop + + +>> Greet +<!-- lack of ... is story start indicator condition --> +* greet + - utter_greet + + +>> Implementation of the TwoStageFallbackPolicy + - ... +* nlu_fallback <!-- like request_restaurant --> + - two_stage_fallback <!-- Activate and run form --> + - form{"name": "two_stage_fallback"} diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 000000000000..3191a241c3e6 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,4808 @@ +[[package]] +category = "main" +description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." +name = "absl-py" +optional = false +python-versions = "*" +version = "0.9.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "File support for asyncio." +name = "aiofiles" +optional = false +python-versions = "*" +version = "0.5.0" + +[[package]] +category = "main" +description = "Async http client/server framework (asyncio)" +name = "aiohttp" +optional = false +python-versions = ">=3.5.3" +version = "3.6.2" + +[package.dependencies] +async-timeout = ">=3.0,<4.0" +attrs = ">=17.3.0" +chardet = ">=2.0,<4.0" +multidict = ">=4.5,<5.0" +yarl = ">=1.0,<2.0" + +[package.dependencies.idna-ssl] +python = "<3.7" +version = ">=1.0" + +[package.dependencies.typing-extensions] +python = "<3.7" +version = ">=3.6.5" + +[package.extras] +speedups = ["aiodns", "brotlipy", "cchardet"] + +[[package]] +category = "dev" +description = "Mock out requests made by ClientSession from aiohttp package" +name = "aioresponses" +optional = false +python-versions = "*" +version = "0.6.4" + +[package.dependencies] +aiohttp = ">=2.0.0,<4.0.0" + +[[package]] +category = "dev" +description = "apipkg: namespace control and lazy-import mechanism" +name = "apipkg" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.5" + +[[package]] +category = "dev" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +name = "appdirs" +optional = false +python-versions = "*" +version = "1.4.4" + +[[package]] +category = "main" +description = "In-process task scheduler with Cron-like capabilities" +name = "apscheduler" +optional = false +python-versions = "*" +version = "3.6.3" + +[package.dependencies] +pytz = "*" +setuptools = ">=0.7" +six = ">=1.4.0" +tzlocal = ">=1.2" + +[package.extras] +asyncio = ["trollius"] +doc = ["sphinx", "sphinx-rtd-theme"] +gevent = ["gevent"] +mongodb = ["pymongo (>=2.8)"] +redis = ["redis (>=3.0)"] +rethinkdb = ["rethinkdb (>=2.4.0)"] +sqlalchemy = ["sqlalchemy (>=0.8)"] +testing = ["pytest", "pytest-cov", "pytest-tornado5", "mock", "pytest-asyncio (<0.6)", "pytest-asyncio"] +tornado = ["tornado (>=4.3)"] +twisted = ["twisted"] +zookeeper = ["kazoo"] + +[[package]] +category = "main" +description = "An AST unparser for Python" +name = "astunparse" +optional = false +python-versions = "*" +version = "1.6.3" + +[package.dependencies] +six = ">=1.6.1,<2.0" +wheel = ">=0.23.0,<1.0" + +[[package]] +category = "main" +description = "Async generators and context managers for Python 3.5+" +name = "async-generator" +optional = false +python-versions = ">=3.5" +version = "1.10" + +[[package]] +category = "main" +description = "Timeout context manager for asyncio programs" +name = "async-timeout" +optional = false +python-versions = ">=3.5.3" +version = "3.0.1" + +[[package]] +category = "dev" +description = "Atomic file writes." +marker = "sys_platform == \"win32\"" +name = "atomicwrites" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.4.0" + +[[package]] +category = "main" +description = "Classes Without Boilerplate" +name = "attrs" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "19.3.0" + +[package.extras] +azure-pipelines = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "pytest-azurepipelines"] +dev = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "pre-commit"] +docs = ["sphinx", "zope.interface"] +tests = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] + +[[package]] +category = "dev" +description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates" +name = "aws-sam-translator" +optional = false +python-versions = "*" +version = "1.26.0" + +[package.dependencies] +boto3 = ">=1.5,<2.0" +jsonschema = ">=3.0,<4.0" +six = ">=1.11,<2.0" + +[package.extras] +dev = ["coverage (>=4.4.0)", "flake8 (>=3.3.0)", "tox (>=2.2.1)", "pytest-cov (>=2.4.0)", "pylint (>=1.7.2,<2.0)", "pyyaml (>=5.1)", "pytest (>=3.0.7)", "mock (>=2.0.0)", "parameterized (>=0.6.1)", "requests (>=2.20.0)", "docopt (>=0.6.2)"] + +[[package]] +category = "dev" +description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." +name = "aws-xray-sdk" +optional = false +python-versions = "*" +version = "2.6.0" + +[package.dependencies] +botocore = ">=1.11.3" +future = "*" +jsonpickle = "*" +wrapt = "*" + +[[package]] +category = "dev" +description = "Microsoft Azure Core Library for Python" +name = "azure-core" +optional = false +python-versions = "*" +version = "1.8.0" + +[package.dependencies] +requests = ">=2.18.4" +six = ">=1.6" + +[[package]] +category = "dev" +description = "Microsoft Azure Blob Storage Client Library for Python" +name = "azure-storage-blob" +optional = false +python-versions = "*" +version = "12.3.2" + +[package.dependencies] +azure-core = ">=1.6.0,<2.0.0" +cryptography = ">=2.1.4" +msrest = ">=0.6.10" + +[[package]] +category = "dev" +description = "The uncompromising code formatter." +name = "black" +optional = false +python-versions = ">=3.6" +version = "19.10b0" + +[package.dependencies] +appdirs = "*" +attrs = ">=18.1.0" +click = ">=6.5" +pathspec = ">=0.6,<1" +regex = "*" +toml = ">=0.9.4" +typed-ast = ">=1.4.0" + +[package.extras] +d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] + +[[package]] +category = "main" +description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." +name = "blis" +optional = true +python-versions = "*" +version = "0.4.1" + +[package.dependencies] +numpy = ">=1.15.0" + +[[package]] +category = "dev" +description = "Amazon Web Services Library" +name = "boto" +optional = false +python-versions = "*" +version = "2.49.0" + +[[package]] +category = "main" +description = "The AWS SDK for Python" +name = "boto3" +optional = false +python-versions = "*" +version = "1.14.43" + +[package.dependencies] +botocore = ">=1.17.43,<1.18.0" +jmespath = ">=0.7.1,<1.0.0" +s3transfer = ">=0.3.0,<0.4.0" + +[[package]] +category = "main" +description = "Low-level, data-driven core of boto 3." +name = "botocore" +optional = false +python-versions = "*" +version = "1.17.43" + +[package.dependencies] +docutils = ">=0.10,<0.16" +jmespath = ">=0.7.1,<1.0.0" +python-dateutil = ">=2.1,<3.0.0" + +[package.dependencies.urllib3] +python = "<3.4.0 || >=3.5.0" +version = ">=1.20,<1.26" + +[[package]] +category = "main" +description = "Extensible memoizing collections and decorators" +name = "cachetools" +optional = false +python-versions = "~=3.5" +version = "4.1.1" + +[[package]] +category = "main" +description = "Super lightweight function registries for your library" +name = "catalogue" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "1.0.0" + +[package.dependencies] +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.20" + +[[package]] +category = "main" +description = "Python package for providing Mozilla's CA Bundle." +name = "certifi" +optional = false +python-versions = "*" +version = "2020.6.20" + +[[package]] +category = "main" +description = "Foreign Function Interface for Python calling C code." +name = "cffi" +optional = false +python-versions = "*" +version = "1.14.2" + +[package.dependencies] +pycparser = "*" + +[[package]] +category = "dev" +description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved" +name = "cfn-lint" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.35.0" + +[package.dependencies] +aws-sam-translator = ">=1.25.0" +jsonschema = ">=3.0,<4.0" +junit-xml = ">=1.9,<2.0" +six = ">=1.11,<2.0" + +[package.dependencies.importlib-resources] +python = "<3.4.0 || >=3.5.0,<3.7" +version = ">=1.4,<2.0" + +[package.dependencies.jsonpatch] +python = "<3.4.0 || >=3.5.0" +version = "*" + +[package.dependencies.networkx] +python = ">=3.5" +version = ">=2.4,<3.0" + +[package.dependencies.pyyaml] +python = "<3.4.0 || >=3.5.0" +version = "*" + +[[package]] +category = "main" +description = "Universal encoding detector for Python 2 and 3" +name = "chardet" +optional = false +python-versions = "*" +version = "3.0.4" + +[[package]] +category = "main" +description = "Composable command line interface toolkit" +name = "click" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "7.1.2" + +[[package]] +category = "main" +description = "Extended pickling support for Python objects" +name = "cloudpickle" +optional = false +python-versions = "*" +version = "1.3.0" + +[[package]] +category = "dev" +description = "Cross-platform colored terminal text." +marker = "sys_platform == \"win32\"" +name = "colorama" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.4.3" + +[[package]] +category = "main" +description = "Colorful worry-free console applications for Linux, Mac OS X, and Windows." +name = "colorclass" +optional = false +python-versions = "*" +version = "2.2.0" + +[[package]] +category = "main" +description = "Colored terminal output for Python's logging module" +name = "coloredlogs" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "14.0" + +[package.dependencies] +humanfriendly = ">=7.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +category = "main" +description = "Generate a color based on a value" +name = "colorhash" +optional = false +python-versions = "*" +version = "1.0.2" + +[[package]] +category = "main" +description = "PEP 567 Backport" +marker = "python_version < \"3.7\"" +name = "contextvars" +optional = false +python-versions = "*" +version = "2.4" + +[package.dependencies] +immutables = ">=0.9" + +[[package]] +category = "dev" +description = "Code coverage measurement for Python" +name = "coverage" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +version = "5.2.1" + +[package.extras] +toml = ["toml"] + +[[package]] +category = "dev" +description = "Show coverage stats online via coveralls.io" +name = "coveralls" +optional = false +python-versions = ">= 3.5" +version = "2.1.2" + +[package.dependencies] +coverage = ">=4.1,<6.0" +docopt = ">=0.6.1" +requests = ">=1.0.0" + +[package.extras] +yaml = ["PyYAML (>=3.10)"] + +[[package]] +category = "main" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +name = "cryptography" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +version = "3.0" + +[package.dependencies] +cffi = ">=1.8,<1.11.3 || >1.11.3" +six = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5,<1.8.0 || >1.8.0,<3.1.0 || >3.1.0,<3.1.1 || >3.1.1)", "sphinx-rtd-theme"] +docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +idna = ["idna (>=2.1)"] +pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pytest (>=3.6.0,<3.9.0 || >3.9.0,<3.9.1 || >3.9.1,<3.9.2 || >3.9.2)", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,<3.79.2 || >3.79.2)"] + +[[package]] +category = "main" +description = "Composable style cycles" +name = "cycler" +optional = false +python-versions = "*" +version = "0.10.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "Manage calls to calloc/free through Cython" +name = "cymem" +optional = true +python-versions = "*" +version = "2.0.3" + +[[package]] +category = "main" +description = "A backport of the dataclasses module for Python 3.6" +marker = "python_version < \"3.7\"" +name = "dataclasses" +optional = true +python-versions = "*" +version = "0.6" + +[[package]] +category = "main" +description = "Decorators for Humans" +name = "decorator" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" +version = "4.4.2" + +[[package]] +category = "main" +description = "DNS toolkit" +name = "dnspython" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.16.0" + +[package.extras] +DNSSEC = ["pycryptodome", "ecdsa (>=0.13)"] +IDNA = ["idna (>=2.1)"] + +[[package]] +category = "dev" +description = "A Python library for the Docker Engine API." +name = "docker" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "4.3.0" + +[package.dependencies] +pywin32 = "227" +requests = ">=2.14.2,<2.18.0 || >2.18.0" +six = ">=1.4.0" +websocket-client = ">=0.32.0" + +[package.extras] +ssh = ["paramiko (>=2.4.2)"] +tls = ["pyOpenSSL (>=17.5.0)", "cryptography (>=1.3.4)", "idna (>=2.0.0)"] + +[[package]] +category = "main" +description = "Pythonic argument parser, that will make you smile" +name = "docopt" +optional = false +python-versions = "*" +version = "0.6.2" + +[[package]] +category = "dev" +description = "Docspec is a JSON object specification for representing API documentation of programming languages." +name = "docspec" +optional = false +python-versions = "*" +version = "0.2.0" + +[package.dependencies] +"nr.databind.core" = ">=0.0.19,<0.1.0" +"nr.databind.json" = ">=0.0.9,<0.1.0" + +[[package]] +category = "dev" +description = "A parser based on lib2to3 producing docspec data from Python source code." +name = "docspec-python" +optional = false +python-versions = "*" +version = "0.0.7" + +[package.dependencies] +docspec = ">=0.2.0,<0.3.0" +"nr.sumtype" = ">=0.0.3,<0.1.0" + +[[package]] +category = "main" +description = "Docutils -- Python Documentation Utilities" +name = "docutils" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "0.15.2" + +[[package]] +category = "dev" +description = "ECDSA cryptographic signature library (pure python)" +name = "ecdsa" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "0.15" + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +gmpy = ["gmpy"] +gmpy2 = ["gmpy2"] + +[[package]] +category = "dev" +description = "execnet: rapid multi-Python deployment" +name = "execnet" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.7.1" + +[package.dependencies] +apipkg = ">=1.4" + +[package.extras] +testing = ["pre-commit"] + +[[package]] +category = "dev" +description = "Fake implementation of redis API for testing purposes." +name = "fakeredis" +optional = false +python-versions = ">=3.5" +version = "1.4.2" + +[package.dependencies] +redis = "<3.6.0" +six = ">=1.12" +sortedcontainers = "*" + +[package.extras] +aioredis = ["aioredis"] +lua = ["lupa"] + +[[package]] +category = "main" +description = "A python library to communicate with the Facebook Messenger API's" +name = "fbmessenger" +optional = false +python-versions = "*" +version = "6.0.0" + +[package.dependencies] +requests = ">=2.0" + +[[package]] +category = "main" +description = "A platform independent file lock." +name = "filelock" +optional = true +python-versions = "*" +version = "3.0.12" + +[[package]] +category = "dev" +description = "the modular source code checker: pep8 pyflakes and co" +name = "flake8" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "3.8.3" + +[package.dependencies] +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" + +[[package]] +category = "dev" +description = "Let your Python tests travel through time" +name = "freezegun" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.3.15" + +[package.dependencies] +python-dateutil = ">=1.0,<2.0 || >2.0" +six = "*" + +[[package]] +category = "main" +description = "Clean single-source support for Python 3 and 2" +name = "future" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "0.18.2" + +[[package]] +category = "main" +description = "Python AST that abstracts the underlying Python version" +name = "gast" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.3.3" + +[[package]] +category = "main" +description = "Python wrapper for the GitHub API(http://developer.github.com/v3)" +name = "github3.py" +optional = true +python-versions = "*" +version = "1.3.0" + +[package.dependencies] +jwcrypto = ">=0.5.0" +python-dateutil = ">=2.6.0" +requests = ">=2.18" +uritemplate = ">=3.0.0" + +[package.extras] +sni = ["pyopenssl", "ndg-httpsclient", "pyasn1"] +test = ["betamax (>=0.8.0)", "pytest (>2.3.5)", "betamax-matchers (>=0.1.0)", "unittest2 (0.5.1)", "mock"] + +[[package]] +category = "dev" +description = "Google API client core library" +name = "google-api-core" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.22.1" + +[package.dependencies] +google-auth = ">=1.19.1,<2.0dev" +googleapis-common-protos = ">=1.6.0,<2.0dev" +protobuf = ">=3.12.0" +pytz = "*" +requests = ">=2.18.0,<3.0.0dev" +setuptools = ">=34.0.0" +six = ">=1.10.0" + +[package.extras] +grpc = ["grpcio (>=1.29.0,<2.0dev)"] +grpcgcp = ["grpcio-gcp (>=0.2.2)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2)"] + +[[package]] +category = "main" +description = "Google Authentication Library" +name = "google-auth" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.20.1" + +[package.dependencies] +cachetools = ">=2.0.0,<5.0" +pyasn1-modules = ">=0.2.1" +setuptools = ">=40.3.0" +six = ">=1.9.0" + +[package.dependencies.rsa] +python = ">=3.5" +version = ">=3.1.4,<5" + +[[package]] +category = "main" +description = "Google Authentication Library" +name = "google-auth-oauthlib" +optional = false +python-versions = "*" +version = "0.4.1" + +[package.dependencies] +google-auth = "*" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click"] + +[[package]] +category = "dev" +description = "Google Cloud API client core library" +name = "google-cloud-core" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.4.1" + +[package.dependencies] +google-api-core = ">=1.19.0,<2.0.0dev" + +[package.extras] +grpc = ["grpcio (>=1.8.2,<2.0dev)"] + +[[package]] +category = "dev" +description = "Google Cloud Storage API client library" +name = "google-cloud-storage" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.30.0" + +[package.dependencies] +google-auth = ">=1.11.0,<2.0dev" +google-cloud-core = ">=1.2.0,<2.0dev" +google-resumable-media = ">=0.6.0,<2.0dev" + +[[package]] +category = "dev" +description = "A python wrapper of the C library 'Google CRC32C'" +marker = "python_version >= \"3.5\"" +name = "google-crc32c" +optional = false +python-versions = ">=3.5" +version = "0.1.0" + +[package.dependencies] +cffi = ">=1.0.0" + +[package.extras] +testing = ["pytest"] + +[[package]] +category = "main" +description = "pasta is an AST-based Python refactoring library" +name = "google-pasta" +optional = false +python-versions = "*" +version = "0.2.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "dev" +description = "Utilities for Google Media Downloads and Resumable Uploads" +name = "google-resumable-media" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +version = "0.7.1" + +[package.dependencies] +six = "*" + +[package.dependencies.google-crc32c] +python = ">=3.5" +version = ">=0.1.0,<0.2dev" + +[package.extras] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +category = "dev" +description = "Common protobufs used in Google APIs" +name = "googleapis-common-protos" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.52.0" + +[package.dependencies] +protobuf = ">=3.6.0" + +[package.extras] +grpc = ["grpcio (>=1.0.0)"] + +[[package]] +category = "main" +description = "HTTP/2-based RPC framework" +name = "grpcio" +optional = false +python-versions = "*" +version = "1.31.0" + +[package.dependencies] +six = ">=1.5.2" + +[package.extras] +protobuf = ["grpcio-tools (>=1.31.0)"] + +[[package]] +category = "main" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +name = "h11" +optional = false +python-versions = "*" +version = "0.8.1" + +[[package]] +category = "main" +description = "HTTP/2 State-Machine based protocol implementation" +name = "h2" +optional = false +python-versions = "*" +version = "3.2.0" + +[package.dependencies] +hpack = ">=3.0,<4" +hyperframe = ">=5.2.0,<6" + +[[package]] +category = "main" +description = "Read and write HDF5 files from Python" +name = "h5py" +optional = false +python-versions = "*" +version = "2.10.0" + +[package.dependencies] +numpy = ">=1.7" +six = "*" + +[[package]] +category = "main" +description = "Pure-Python HPACK header compression" +name = "hpack" +optional = false +python-versions = "*" +version = "3.0.0" + +[[package]] +category = "main" +description = "Chromium HSTS Preload list as a Python package and updated daily" +name = "hstspreload" +optional = false +python-versions = ">=3.6" +version = "2020.8.12" + +[[package]] +category = "main" +description = "A comprehensive HTTP client library." +name = "httplib2" +optional = false +python-versions = "*" +version = "0.18.1" + +[[package]] +category = "main" +description = "A collection of framework independent HTTP protocol utils." +name = "httptools" +optional = false +python-versions = "*" +version = "0.1.1" + +[package.extras] +test = ["Cython (0.29.14)"] + +[[package]] +category = "main" +description = "The next generation HTTP client." +name = "httpx" +optional = false +python-versions = ">=3.6" +version = "0.9.3" + +[package.dependencies] +certifi = "*" +chardet = ">=3.0.0,<4.0.0" +h11 = ">=0.8.0,<0.9.0" +h2 = ">=3.0.0,<4.0.0" +hstspreload = "*" +idna = ">=2.0.0,<3.0.0" +rfc3986 = ">=1.3,<2" +sniffio = ">=1.0.0,<2.0.0" + +[[package]] +category = "main" +description = "Human friendly output for text interfaces using Python" +name = "humanfriendly" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "8.2" + +[package.dependencies] +pyreadline = "*" + +[[package]] +category = "main" +description = "HTTP/2 framing layer for Python" +name = "hyperframe" +optional = false +python-versions = "*" +version = "5.2.0" + +[[package]] +category = "main" +description = "Internationalized Domain Names in Applications (IDNA)" +name = "idna" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.8" + +[[package]] +category = "main" +description = "Patch ssl.match_hostname for Unicode(idna) domains support" +marker = "python_version < \"3.7\"" +name = "idna-ssl" +optional = false +python-versions = "*" +version = "1.1.0" + +[package.dependencies] +idna = ">=2.0" + +[[package]] +category = "main" +description = "Immutable Collections" +marker = "python_version < \"3.7\"" +name = "immutables" +optional = false +python-versions = ">=3.5" +version = "0.14" + +[[package]] +category = "dev" +description = "A library to calculate python dependency graphs." +name = "importlab" +optional = false +python-versions = ">=2.7.0" +version = "0.5.1" + +[package.dependencies] +networkx = "*" +six = "*" + +[[package]] +category = "main" +description = "Read metadata from Python packages" +name = "importlib-metadata" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +version = "1.7.0" + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["sphinx", "rst.linker"] +testing = ["packaging", "pep517", "importlib-resources (>=1.3)"] + +[[package]] +category = "dev" +description = "Read resources from Python packages" +marker = "python_version < \"3.7\" and python_version != \"3.4\"" +name = "importlib-resources" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +version = "1.5.0" + +[package.dependencies] +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" + +[package.dependencies.zipp] +python = "<3.8" +version = ">=0.4" + +[package.extras] +docs = ["sphinx", "rst.linker", "jaraco.packaging"] + +[[package]] +category = "dev" +description = "" +name = "incremental" +optional = false +python-versions = "*" +version = "17.5.0" + +[package.extras] +scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] + +[[package]] +category = "main" +description = "IPv4/IPv6 manipulation library" +name = "ipaddress" +optional = false +python-versions = "*" +version = "1.0.23" + +[[package]] +category = "dev" +description = "An ISO 8601 date/time/duration parser and formatter" +name = "isodate" +optional = false +python-versions = "*" +version = "0.6.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "Chinese Words Segmentation Utilities" +name = "jieba" +optional = true +python-versions = "*" +version = "0.42.1" + +[[package]] +category = "dev" +description = "A very fast and expressive template engine." +name = "jinja2" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.11.2" + +[package.dependencies] +MarkupSafe = ">=0.23" + +[package.extras] +i18n = ["Babel (>=0.8)"] + +[[package]] +category = "main" +description = "JSON Matching Expressions" +name = "jmespath" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "0.10.0" + +[[package]] +category = "main" +description = "Lightweight pipelining: using Python functions as pipeline jobs." +name = "joblib" +optional = false +python-versions = ">=3.6" +version = "0.15.1" + +[[package]] +category = "dev" +description = "Diff JSON and JSON-like structures in Python" +name = "jsondiff" +optional = false +python-versions = "*" +version = "1.1.2" + +[[package]] +category = "dev" +description = "Apply JSON-Patches (RFC 6902)" +marker = "python_version != \"3.4\"" +name = "jsonpatch" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "1.26" + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +category = "main" +description = "Python library for serializing any arbitrary object graph into JSON" +name = "jsonpickle" +optional = false +python-versions = ">=2.7" +version = "1.4.1" + +[package.dependencies] +importlib-metadata = "*" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["coverage (<5)", "pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-black-multipy", "pytest-cov", "ecdsa", "feedparser", "numpy", "pandas", "pymongo", "sqlalchemy", "enum34", "jsonlib"] +"testing.libs" = ["demjson", "simplejson", "ujson", "yajl"] + +[[package]] +category = "dev" +description = "Identify specific nodes in a JSON document (RFC 6901)" +marker = "python_version != \"3.4\"" +name = "jsonpointer" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.0" + +[[package]] +category = "main" +description = "An implementation of JSON Schema validation for Python" +name = "jsonschema" +optional = false +python-versions = "*" +version = "3.2.0" + +[package.dependencies] +attrs = ">=17.4.0" +pyrsistent = ">=0.14.0" +setuptools = "*" +six = ">=1.11.0" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" + +[package.extras] +format = ["idna", "jsonpointer (>1.13)", "rfc3987", "strict-rfc3339", "webcolors"] +format_nongpl = ["idna", "jsonpointer (>1.13)", "webcolors", "rfc3986-validator (>0.1.0)", "rfc3339-validator"] + +[[package]] +category = "dev" +description = "Creates JUnit XML test result documents that can be read by tools such as Jenkins" +name = "junit-xml" +optional = false +python-versions = "*" +version = "1.9" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "Implementation of JOSE Web standards" +name = "jwcrypto" +optional = true +python-versions = "*" +version = "0.7" + +[package.dependencies] +cryptography = ">=1.5" + +[[package]] +category = "main" +description = "Pure Python client for Apache Kafka" +name = "kafka-python" +optional = false +python-versions = "*" +version = "2.0.1" + +[[package]] +category = "main" +description = "Easy data preprocessing and data augmentation for deep learning models" +name = "keras-preprocessing" +optional = false +python-versions = "*" +version = "1.1.2" + +[package.dependencies] +numpy = ">=1.9.1" +six = ">=1.9.0" + +[package.extras] +image = ["scipy (>=0.14)", "Pillow (>=5.2.0)"] +pep8 = ["flake8"] +tests = ["pandas", "pillow", "tensorflow", "keras", "pytest", "pytest-xdist", "pytest-cov"] + +[[package]] +category = "main" +description = "A fast implementation of the Cassowary constraint solver" +name = "kiwisolver" +optional = false +python-versions = ">=3.6" +version = "1.2.0" + +[[package]] +category = "main" +description = "Python implementation of Markdown." +name = "markdown" +optional = false +python-versions = ">=3.5" +version = "3.2.2" + +[package.dependencies] +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" + +[package.extras] +testing = ["coverage", "pyyaml"] + +[[package]] +category = "dev" +description = "Safely add untrusted strings to HTML/XML markup." +name = "markupsafe" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.1.1" + +[[package]] +category = "main" +description = "Python plotting package" +name = "matplotlib" +optional = false +python-versions = ">=3.6" +version = "3.2.2" + +[package.dependencies] +cycler = ">=0.10" +kiwisolver = ">=1.0.1" +numpy = ">=1.11" +pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" +python-dateutil = ">=2.1" + +[[package]] +category = "main" +description = "A mattermost api v4 wrapper to interact with api" +name = "mattermostwrapper" +optional = false +python-versions = "*" +version = "2.2" + +[package.dependencies] +requests = "*" + +[[package]] +category = "dev" +description = "McCabe checker, plugin for flake8" +name = "mccabe" +optional = false +python-versions = "*" +version = "0.6.1" + +[[package]] +category = "dev" +description = "Rolling backport of unittest.mock for all Pythons" +name = "mock" +optional = false +python-versions = ">=3.6" +version = "4.0.2" + +[package.extras] +build = ["twine", "wheel", "blurb"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +category = "dev" +description = "Fake pymongo stub for testing simple MongoDB-dependent code" +name = "mongomock" +optional = false +python-versions = "*" +version = "3.20.0" + +[package.dependencies] +sentinels = "*" +six = "*" + +[[package]] +category = "dev" +description = "More routines for operating on iterables, beyond itertools" +name = "more-itertools" +optional = false +python-versions = ">=3.5" +version = "8.4.0" + +[[package]] +category = "dev" +description = "A library that allows your python tests to easily mock out the boto library" +name = "moto" +optional = false +python-versions = "*" +version = "1.3.14" + +[package.dependencies] +Jinja2 = ">=2.10.1" +PyYAML = ">=5.1" +aws-xray-sdk = ">=0.93,<0.96 || >0.96" +boto = ">=2.36.0" +boto3 = ">=1.9.201" +botocore = ">=1.12.201" +cfn-lint = ">=0.4.0" +cryptography = ">=2.3.0" +docker = ">=2.5.1" +idna = ">=2.5,<2.9" +jsondiff = "1.1.2" +mock = "*" +python-dateutil = ">=2.1,<3.0.0" +python-jose = "<4.0.0" +pytz = "*" +requests = ">=2.5" +responses = ">=0.9.0" +six = ">1.9" +sshpubkeys = ">=3.1.0,<4.0" +werkzeug = "*" +xmltodict = "*" + +[package.extras] +server = ["flask"] + +[[package]] +category = "dev" +description = "AutoRest swagger generator Python client runtime." +name = "msrest" +optional = false +python-versions = "*" +version = "0.6.18" + +[package.dependencies] +certifi = ">=2017.4.17" +isodate = ">=0.6.0" +requests = ">=2.16,<3.0" +requests-oauthlib = ">=0.5.0" + +[package.extras] +async = ["aiohttp (>=3.0)", "aiodns"] + +[[package]] +category = "main" +description = "multidict implementation" +name = "multidict" +optional = false +python-versions = ">=3.5" +version = "4.7.6" + +[[package]] +category = "main" +description = "Cython bindings for MurmurHash" +name = "murmurhash" +optional = true +python-versions = "*" +version = "1.0.2" + +[[package]] +category = "main" +description = "Python package for creating and manipulating graphs and networks" +name = "networkx" +optional = false +python-versions = ">=3.5" +version = "2.4" + +[package.dependencies] +decorator = ">=4.3.0" + +[package.extras] +all = ["numpy", "scipy", "pandas", "matplotlib", "pygraphviz", "pydot", "pyyaml", "gdal", "lxml", "pytest"] +gdal = ["gdal"] +lxml = ["lxml"] +matplotlib = ["matplotlib"] +numpy = ["numpy"] +pandas = ["pandas"] +pydot = ["pydot"] +pygraphviz = ["pygraphviz"] +pytest = ["pytest"] +pyyaml = ["pyyaml"] +scipy = ["scipy"] + +[[package]] +category = "dev" +description = "Ninja is a small build system with a focus on speed" +name = "ninja" +optional = false +python-versions = "*" +version = "1.10.0.post1" + +[[package]] +category = "dev" +description = "Useful container datatypes for Python 2 and 3." +name = "nr.collections" +optional = false +python-versions = "*" +version = "0.0.1" + +[package.dependencies] +"nr.metaclass" = ">=0.0.1,<0.1.0" +six = ">=1.11.0,<2.0.0" + +[package.extras] +test = ["nr.fs (>=1.5.0,<2.0.0)"] + +[[package]] +category = "dev" +description = "Bind structured data directly to typed objects." +name = "nr.databind.core" +optional = false +python-versions = "*" +version = "0.0.21" + +[package.dependencies] +"nr.collections" = ">=0.0.1,<1.0.0" +"nr.interface" = ">=0.0.1,<0.1.0" +"nr.pylang.utils" = ">=0.0.3,<0.1.0" +"nr.stream" = ">=0.0.1,<0.1.0" + +[package.extras] +test = ["pytest", "pyyaml"] + +[[package]] +category = "dev" +description = "Deserialize JSON into Python objects and reverse." +name = "nr.databind.json" +optional = false +python-versions = "*" +version = "0.0.13" + +[package.dependencies] +"nr.collections" = ">=0.0.1,<1.0.0" +"nr.databind.core" = ">=0.0.16,<0.1.0" +"nr.interface" = ">=0.0.1,<0.1.0" +"nr.parsing.date" = ">=0.1.0,<1.0.0" +"nr.pylang.utils" = ">=0.0.1,<0.1.0" + +[package.extras] +test = ["pytest", "pyyaml", "flask"] + +[[package]] +category = "dev" +description = "Filesystem and path manipulation tools." +name = "nr.fs" +optional = false +python-versions = "*" +version = "1.6.2" + +[package.dependencies] +six = ">=1.11.0,<2.0.0" + +[[package]] +category = "dev" +description = "Interface definitions for Python." +name = "nr.interface" +optional = false +python-versions = "*" +version = "0.0.3" + +[package.dependencies] +"nr.collections" = ">=0.0.1,<1.0.0" +"nr.metaclass" = ">=0.0.1,<0.1.0" +"nr.pylang.utils" = ">=0.0.1,<0.1.0" +six = ">=1.11.0,<2.0.0" + +[[package]] +category = "dev" +description = "Metaclass utilities." +name = "nr.metaclass" +optional = false +python-versions = "*" +version = "0.0.5" + +[[package]] +category = "dev" +description = "A simple and fast date parsing library. Uses dateutil for timezone offset support." +name = "nr.parsing.date" +optional = false +python-versions = "*" +version = "0.3.0" + +[package.dependencies] +"nr.utils.re" = ">=0.1.0,<0.2.0" + +[package.extras] +test = ["pytest", "python-dateutil"] + +[[package]] +category = "dev" +description = "Package description here." +name = "nr.pylang.utils" +optional = false +python-versions = "*" +version = "0.0.3" + +[package.dependencies] +"nr.collections" = ">=0.0.1,<1.0.0" + +[[package]] +category = "dev" +description = "Use iterators like Java streams." +name = "nr.stream" +optional = false +python-versions = "*" +version = "0.0.4" + +[package.dependencies] +"nr.collections" = ">=0.0.1,<1.0.0" +"nr.pylang.utils" = ">=0.0.1,<1.0.0" +six = ">=1.11.0,<2.0.0" + +[[package]] +category = "dev" +description = "Sumtypes in Python." +name = "nr.sumtype" +optional = false +python-versions = "*" +version = "0.0.3" + +[package.dependencies] +"nr.metaclass" = ">=0.0.4,<1.0.0" +"nr.stream" = ">=0.0.2,<1.0.0" + +[[package]] +category = "dev" +description = "This module provides some utility functions for applying regular expressions." +name = "nr.utils.re" +optional = false +python-versions = "*" +version = "0.1.0" + +[package.extras] +test = ["pytest"] + +[[package]] +category = "main" +description = "NumPy is the fundamental package for array computing with Python." +name = "numpy" +optional = false +python-versions = ">=3.5" +version = "1.18.5" + +[[package]] +category = "main" +description = "OAuth 2.0 client library" +name = "oauth2client" +optional = false +python-versions = "*" +version = "4.1.3" + +[package.dependencies] +httplib2 = ">=0.9.1" +pyasn1 = ">=0.1.7" +pyasn1-modules = ">=0.0.5" +rsa = ">=3.1.4" +six = ">=1.6.1" + +[[package]] +category = "main" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +name = "oauthlib" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "3.1.0" + +[package.extras] +rsa = ["cryptography"] +signals = ["blinker"] +signedtoken = ["cryptography", "pyjwt (>=1.0.0)"] + +[[package]] +category = "main" +description = "Optimizing numpys einsum function" +name = "opt-einsum" +optional = false +python-versions = ">=3.5" +version = "3.3.0" + +[package.dependencies] +numpy = ">=1.7" + +[package.extras] +docs = ["sphinx (1.2.3)", "sphinxcontrib-napoleon", "sphinx-rtd-theme", "numpydoc"] +tests = ["pytest", "pytest-cov", "pytest-pep8"] + +[[package]] +category = "main" +description = "Core utilities for Python packages" +name = "packaging" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "20.4" + +[package.dependencies] +pyparsing = ">=2.0.2" +six = "*" + +[[package]] +category = "dev" +description = "Utility library for gitignore style pattern matching of file paths." +name = "pathspec" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.8.0" + +[[package]] +category = "dev" +description = "File system general utilities" +name = "pathtools" +optional = false +python-versions = "*" +version = "0.1.2" + +[[package]] +category = "dev" +description = "Utilities to deal with pep440 versioning" +name = "pep440-version-utils" +optional = false +python-versions = ">=3.6,<4.0" +version = "0.3.0" + +[package.dependencies] +packaging = ">=20.3,<21.0" + +[[package]] +category = "main" +description = "Pika Python AMQP Client Library" +name = "pika" +optional = false +python-versions = "*" +version = "1.1.0" + +[package.extras] +tornado = ["tornado"] +twisted = ["twisted"] + +[[package]] +category = "main" +description = "The smartest command line arguments parser in the world" +name = "plac" +optional = true +python-versions = "*" +version = "1.1.3" + +[[package]] +category = "dev" +description = "plugin and hook calling mechanisms for python" +name = "pluggy" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.13.1" + +[package.dependencies] +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.12" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +category = "main" +description = "Cython hash table that trusts the keys are pre-hashed" +name = "preshed" +optional = true +python-versions = "*" +version = "3.0.2" + +[package.dependencies] +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=0.28.0,<1.1.0" + +[[package]] +category = "main" +description = "Library for building powerful interactive command lines in Python" +name = "prompt-toolkit" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "2.0.10" + +[package.dependencies] +six = ">=1.9.0" +wcwidth = "*" + +[[package]] +category = "main" +description = "Protocol Buffers" +name = "protobuf" +optional = false +python-versions = "*" +version = "3.13.0" + +[package.dependencies] +setuptools = "*" +six = ">=1.9" + +[[package]] +category = "main" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +name = "psycopg2-binary" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "2.8.5" + +[[package]] +category = "dev" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +name = "py" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.9.0" + +[[package]] +category = "main" +description = "ASN.1 types and codecs" +name = "pyasn1" +optional = false +python-versions = "*" +version = "0.4.8" + +[[package]] +category = "main" +description = "A collection of ASN.1-based protocols modules." +name = "pyasn1-modules" +optional = false +python-versions = "*" +version = "0.2.8" + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.5.0" + +[[package]] +category = "dev" +description = "Python style guide checker" +name = "pycodestyle" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.6.0" + +[[package]] +category = "main" +description = "C parser in Python" +name = "pycparser" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.20" + +[[package]] +category = "dev" +description = "Create Python API documentation in Markdown format." +name = "pydoc-markdown" +optional = false +python-versions = "*" +version = "3.3.0.post1" + +[package.dependencies] +PyYAML = ">=5.3,<6.0.0" +click = ">=7.0,<8.0.0" +docspec = ">=0.2.0,<0.3.0" +docspec-python = ">=0.0.7,<0.1.0" +"nr.collections" = ">=0.0.1,<0.1.0" +"nr.databind.core" = ">=0.0.18,<0.1.0" +"nr.databind.json" = ">=0.0.9,<0.1.0" +"nr.fs" = ">=1.6.0,<2.0.0" +"nr.interface" = ">=0.0.3,<0.1.0" +requests = ">=2.23.0,<3.0.0" +six = ">=1.11.0,<2.0.0" +toml = ">=0.10.1,<1.0.0" +watchdog = ">=0.10.2,<1.0.0" + +[package.source] +reference = "rasa-pypi" +type = "legacy" +url = "https://pypi.rasa.com/simple" + +[[package]] +category = "main" +description = "Python interface to Graphviz's Dot" +name = "pydot" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.4.1" + +[package.dependencies] +pyparsing = ">=2.1.4" + +[[package]] +category = "dev" +description = "passive checker of Python programs" +name = "pyflakes" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.2.0" + +[[package]] +category = "main" +description = "JSON Web Token implementation in Python" +name = "pyjwt" +optional = false +python-versions = "*" +version = "1.7.1" + +[package.extras] +crypto = ["cryptography (>=1.4)"] +flake8 = ["flake8", "flake8-import-order", "pep8-naming"] +test = ["pytest (>=4.0.1,<5.0.0)", "pytest-cov (>=2.6.0,<3.0.0)", "pytest-runner (>=4.2,<5.0.0)"] + +[[package]] +category = "main" +description = "Python lib/cli for JSON/YAML schema validation" +name = "pykwalify" +optional = false +python-versions = "*" +version = "1.7.0" + +[package.dependencies] +PyYAML = ">=3.11" +docopt = ">=0.6.2" +python-dateutil = ">=2.4.2" + +[package.extras] +ruamel = ["ruamel.yaml (>=0.11.0,<0.16.0)"] + +[[package]] +category = "main" +description = "Python driver for MongoDB <http://www.mongodb.org>" +name = "pymongo" +optional = false +python-versions = "*" +version = "3.10.1" + +[package.dependencies] +[package.dependencies.dnspython] +optional = true +version = ">=1.16.0,<1.17.0" + +[package.dependencies.ipaddress] +optional = true +version = "*" + +[package.extras] +encryption = ["pymongocrypt (<2.0.0)"] +gssapi = ["pykerberos"] +snappy = ["python-snappy"] +srv = ["dnspython (>=1.16.0,<1.17.0)"] +tls = ["ipaddress"] +zstd = ["zstandard"] + +[[package]] +category = "main" +description = "Python parsing module" +name = "pyparsing" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "2.4.7" + +[[package]] +category = "main" +description = "A python implmementation of GNU readline." +marker = "sys_platform == \"win32\"" +name = "pyreadline" +optional = false +python-versions = "*" +version = "2.1" + +[[package]] +category = "main" +description = "Persistent/Functional/Immutable data structures" +name = "pyrsistent" +optional = false +python-versions = "*" +version = "0.16.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "dev" +description = "pytest: simple powerful testing with Python" +name = "pytest" +optional = false +python-versions = ">=3.5" +version = "5.4.3" + +[package.dependencies] +atomicwrites = ">=1.0" +attrs = ">=17.4.0" +colorama = "*" +more-itertools = ">=4.0.0" +packaging = "*" +pluggy = ">=0.12,<1.0" +py = ">=1.5.0" +wcwidth = "*" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.12" + +[package.extras] +checkqa-mypy = ["mypy (v0.761)"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +category = "dev" +description = "Pytest support for asyncio." +name = "pytest-asyncio" +optional = false +python-versions = ">= 3.5" +version = "0.10.0" + +[package.dependencies] +pytest = ">=3.0.6" + +[package.extras] +testing = ["async-generator (>=1.3)", "coverage", "hypothesis (>=3.64)"] + +[[package]] +category = "dev" +description = "Pytest plugin for measuring coverage." +name = "pytest-cov" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.10.1" + +[package.dependencies] +coverage = ">=4.4" +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests (2.0.2)", "six", "pytest-xdist", "virtualenv"] + +[[package]] +category = "dev" +description = "run tests in isolated forked subprocesses" +name = "pytest-forked" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "1.3.0" + +[package.dependencies] +py = "*" +pytest = ">=3.10" + +[[package]] +category = "dev" +description = "py.test plugin to test server connections locally." +name = "pytest-localserver" +optional = false +python-versions = "*" +version = "0.5.0" + +[package.dependencies] +werkzeug = ">=0.10" + +[[package]] +category = "dev" +description = "a pytest plugin for Sanic" +name = "pytest-sanic" +optional = false +python-versions = ">=3.6,<4.0" +version = "1.6.1" + +[package.dependencies] +aiohttp = ">=3.6.2,<4.0.0" +async_generator = ">=1.10,<2.0" +pytest = ">=5.2,<6.0" + +[[package]] +category = "dev" +description = "pytest xdist plugin for distributed testing and loop-on-failing modes" +name = "pytest-xdist" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "1.34.0" + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=4.4.0" +pytest-forked = "*" +six = "*" + +[package.extras] +testing = ["filelock"] + +[[package]] +category = "main" +description = "Python binding for CRFsuite" +name = "python-crfsuite" +optional = false +python-versions = "*" +version = "0.9.7" + +[[package]] +category = "main" +description = "Extensions to the standard Python datetime module" +name = "python-dateutil" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +version = "2.8.1" + +[package.dependencies] +six = ">=1.5" + +[[package]] +category = "main" +description = "Engine.IO server" +name = "python-engineio" +optional = false +python-versions = "*" +version = "3.13.1" + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +asyncio_client = ["aiohttp (>=3.4)"] +client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] + +[[package]] +category = "dev" +description = "JOSE implementation in Python" +name = "python-jose" +optional = false +python-versions = "*" +version = "3.1.0" + +[package.dependencies] +ecdsa = "<1.0" +pyasn1 = "*" +rsa = "*" +six = "<2.0" + +[package.extras] +cryptography = ["cryptography"] +pycrypto = ["pycrypto (>=2.6.0,<2.7.0)", "pyasn1"] +pycryptodome = ["pycryptodome (>=3.3.1,<4.0.0)", "pyasn1"] + +[[package]] +category = "main" +description = "Socket.IO server" +name = "python-socketio" +optional = false +python-versions = "*" +version = "4.6.0" + +[package.dependencies] +python-engineio = ">=3.13.0" +six = ">=1.9.0" + +[package.extras] +asyncio_client = ["aiohttp (>=3.4)", "websockets (>=7.0)"] +client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] + +[[package]] +category = "main" +description = "We have made you a wrapper you can't refuse" +name = "python-telegram-bot" +optional = false +python-versions = "*" +version = "12.8" + +[package.dependencies] +certifi = "*" +cryptography = "*" +decorator = ">=4.4.0" +tornado = ">=5.1" + +[package.extras] +json = ["ujson"] +socks = ["pysocks"] + +[[package]] +category = "dev" +description = "Python type inferencer" +name = "pytype" +optional = false +python-versions = "<3.9,>=3.5" +version = "2020.8.10" + +[package.dependencies] +attrs = "*" +importlab = ">=0.5.1" +ninja = "*" +pyyaml = ">=3.11" +six = "*" +typed_ast = "*" + +[[package]] +category = "main" +description = "World timezone definitions, modern and historical" +name = "pytz" +optional = false +python-versions = "*" +version = "2020.1" + +[[package]] +category = "dev" +description = "Python for Window Extensions" +marker = "sys_platform == \"win32\"" +name = "pywin32" +optional = false +python-versions = "*" +version = "227" + +[[package]] +category = "main" +description = "YAML parser and emitter for Python" +name = "pyyaml" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "5.3.1" + +[[package]] +category = "main" +description = "Python library to build pretty command line user prompts ⭐️" +name = "questionary" +optional = false +python-versions = "*" +version = "1.5.2" + +[package.dependencies] +prompt-toolkit = ">=2.0,<4.0" + +[package.extras] +test = ["pytest", "pytest-pycodestyle", "pytest-cov", "coveralls"] + +[[package]] +category = "main" +description = "Open source machine learning framework to automate text- and voice-based conversations: NLU, dialogue management, connect to Slack, Facebook, and more - Create chatbots and voice assistants" +name = "rasa-sdk" +optional = false +python-versions = ">=3.6,<4.0" +version = "2.0.0a2" + +[package.dependencies] +coloredlogs = ">=10,<15" +requests = ">=2.0,<3.0" +sanic = ">=19.12.2,<20.0.0" +sanic-cors = ">=0.10.0b1,<0.11.0" + +[[package]] +category = "main" +description = "Python client for Redis key-value store" +name = "redis" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "3.5.3" + +[package.extras] +hiredis = ["hiredis (>=0.1.3)"] + +[[package]] +category = "main" +description = "Alternative regular expression module, to replace re." +name = "regex" +optional = false +python-versions = "*" +version = "2020.6.8" + +[[package]] +category = "main" +description = "Python HTTP for Humans." +name = "requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.24.0" + +[package.dependencies] +certifi = ">=2017.4.17" +chardet = ">=3.0.2,<4" +idna = ">=2.5,<3" +urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" + +[package.extras] +security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] +socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7)", "win-inet-pton"] + +[[package]] +category = "main" +description = "OAuthlib authentication support for Requests." +name = "requests-oauthlib" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.3.0" + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib (>=3.0.0)"] + +[[package]] +category = "main" +description = "A utility belt for advanced users of python-requests" +name = "requests-toolbelt" +optional = false +python-versions = "*" +version = "0.9.1" + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +category = "dev" +description = "A utility library for mocking out the `requests` Python library." +name = "responses" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.10.16" + +[package.dependencies] +requests = ">=2.0" +six = "*" +urllib3 = ">=1.25.10" + +[package.extras] +tests = ["coverage (>=3.7.1,<5.0.0)", "pytest-cov", "pytest-localserver", "flake8", "pytest (>=4.6,<5.0)", "pytest (>=4.6)"] + +[[package]] +category = "main" +description = "Validating URI References per RFC 3986" +name = "rfc3986" +optional = false +python-versions = "*" +version = "1.4.0" + +[package.extras] +idna2008 = ["idna"] + +[[package]] +category = "main" +description = "Python API wrapper for Rocket.Chat" +name = "rocketchat-api" +optional = false +python-versions = "*" +version = "1.4" + +[package.dependencies] +requests = "*" + +[[package]] +category = "main" +description = "Pure-Python RSA implementation" +name = "rsa" +optional = false +python-versions = ">=3.5, <4" +version = "4.6" + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +category = "main" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +name = "ruamel.yaml" +optional = false +python-versions = "*" +version = "0.16.10" + +[package.dependencies] +[package.dependencies."ruamel.yaml.clib"] +python = "<3.9" +version = ">=0.1.2" + +[package.extras] +docs = ["ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +category = "main" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +marker = "platform_python_implementation == \"CPython\" and python_version < \"3.9\"" +name = "ruamel.yaml.clib" +optional = false +python-versions = "*" +version = "0.2.0" + +[[package]] +category = "main" +description = "An Amazon S3 Transfer Manager" +name = "s3transfer" +optional = false +python-versions = "*" +version = "0.3.3" + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[[package]] +category = "main" +description = "SacreMoses" +name = "sacremoses" +optional = true +python-versions = "*" +version = "0.0.43" + +[package.dependencies] +click = "*" +joblib = "*" +regex = "*" +six = "*" +tqdm = "*" + +[[package]] +category = "main" +description = "A web server and web framework that's written to go fast. Build fast. Run fast." +name = "sanic" +optional = false +python-versions = ">=3.6" +version = "19.12.2" + +[package.dependencies] +aiofiles = ">=0.3.0" +httptools = ">=0.0.10" +httpx = "0.9.3" +multidict = ">=4.0,<5.0" +ujson = ">=1.35" +uvloop = ">=0.5.3" +websockets = ">=7.0,<9.0" + +[package.extras] +all = ["pytest (5.2.1)", "multidict (>=4.0,<5.0)", "gunicorn", "pytest-cov", "httpcore (0.3.0)", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "aiofiles", "tox", "black", "flake8", "bandit", "towncrier", "sphinx (>=2.1.2)", "sphinx-rtd-theme", "recommonmark (>=0.5.0)", "docutils", "pygments", "uvloop (>=0.5.3)", "ujson (>=1.35)"] +dev = ["pytest (5.2.1)", "multidict (>=4.0,<5.0)", "gunicorn", "pytest-cov", "httpcore (0.3.0)", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "aiofiles", "tox", "black", "flake8", "bandit", "towncrier", "uvloop (>=0.5.3)", "ujson (>=1.35)"] +docs = ["sphinx (>=2.1.2)", "sphinx-rtd-theme", "recommonmark (>=0.5.0)", "docutils", "pygments"] +test = ["pytest (5.2.1)", "multidict (>=4.0,<5.0)", "gunicorn", "pytest-cov", "httpcore (0.3.0)", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "uvloop (>=0.5.3)", "ujson (>=1.35)"] + +[[package]] +category = "main" +description = "A Sanic extension adding a decorator for CORS support. Based on flask-cors by Cory Dolphin." +name = "sanic-cors" +optional = false +python-versions = "*" +version = "0.10.0.post3" + +[package.dependencies] +sanic = ">=18.12.0" +sanic-plugins-framework = ">=0.9.0" + +[[package]] +category = "main" +description = "JWT oauth flow for Sanic" +name = "sanic-jwt" +optional = false +python-versions = "*" +version = "1.4.1" + +[package.dependencies] +pyjwt = "*" + +[package.extras] +all = ["sphinx", "sphinx"] +docs = ["sphinx"] + +[[package]] +category = "main" +description = "Doing all of the boilerplate to create a Sanic Plugin, so you don't have to." +name = "sanic-plugins-framework" +optional = false +python-versions = "*" +version = "0.9.3" + +[package.dependencies] +sanic = ">=18.12.0" + +[[package]] +category = "main" +description = "A set of python modules for machine learning and data mining" +name = "scikit-learn" +optional = false +python-versions = ">=3.6" +version = "0.23.2" + +[package.dependencies] +joblib = ">=0.11" +numpy = ">=1.13.3" +scipy = ">=0.19.1" +threadpoolctl = ">=2.0.0" + +[package.extras] +alldeps = ["numpy (>=1.13.3)", "scipy (>=0.19.1)"] + +[[package]] +category = "main" +description = "SciPy: Scientific Library for Python" +name = "scipy" +optional = false +python-versions = ">=3.5" +version = "1.4.1" + +[package.dependencies] +numpy = ">=1.13.3" + +[[package]] +category = "main" +description = "SentencePiece python wrapper" +name = "sentencepiece" +optional = true +python-versions = "*" +version = "0.1.92" + +[[package]] +category = "dev" +description = "Various objects to denote special meanings in python" +name = "sentinels" +optional = false +python-versions = "*" +version = "1.0.0" + +[[package]] +category = "main" +description = "Python 2 and 3 compatibility utilities" +name = "six" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +version = "1.15.0" + +[[package]] +category = "main" +description = "CRFsuite (python-crfsuite) wrapper which provides interface simlar to scikit-learn" +name = "sklearn-crfsuite" +optional = false +python-versions = "*" +version = "0.3.6" + +[package.dependencies] +python-crfsuite = ">=0.8.3" +six = "*" +tabulate = "*" +tqdm = ">=2.0" + +[[package]] +category = "main" +description = "Slack API clients for Web API and RTM API" +name = "slackclient" +optional = false +python-versions = ">=3.6.0" +version = "2.8.0" + +[package.dependencies] +aiohttp = ">3.5.2,<4.0.0" + +[package.extras] +optional = ["aiodns (>1.0)"] + +[[package]] +category = "main" +description = "Sniff out which async library your code is running under" +name = "sniffio" +optional = false +python-versions = ">=3.5" +version = "1.1.0" + +[package.dependencies] +[package.dependencies.contextvars] +python = "<3.7" +version = ">=2.1" + +[[package]] +category = "dev" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +name = "sortedcontainers" +optional = false +python-versions = "*" +version = "2.2.2" + +[[package]] +category = "main" +description = "Industrial-strength Natural Language Processing (NLP) in Python" +name = "spacy" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "2.2.4" + +[package.dependencies] +blis = ">=0.4.0,<0.5.0" +catalogue = ">=0.0.7,<1.1.0" +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=0.28.0,<1.1.0" +numpy = ">=1.15.0" +plac = ">=0.9.6,<1.2.0" +preshed = ">=3.0.2,<3.1.0" +requests = ">=2.13.0,<3.0.0" +setuptools = "*" +srsly = ">=1.0.2,<1.1.0" +thinc = "7.4.0" +tqdm = ">=4.38.0,<5.0.0" +wasabi = ">=0.4.0,<1.1.0" + +[package.extras] +cuda = ["cupy (>=5.0.0b4)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] +ja = ["fugashi (>=0.1.3)"] +ko = ["natto-py (0.9.0)"] +lookups = ["spacy-lookups-data (>=0.0.5,<0.2.0)"] +th = ["pythainlp (>=2.0)"] + +[[package]] +category = "main" +description = "Database Abstraction Library" +name = "sqlalchemy" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.3.18" + +[package.extras] +mssql = ["pyodbc"] +mssql_pymssql = ["pymssql"] +mssql_pyodbc = ["pyodbc"] +mysql = ["mysqlclient"] +oracle = ["cx-oracle"] +postgresql = ["psycopg2"] +postgresql_pg8000 = ["pg8000"] +postgresql_psycopg2binary = ["psycopg2-binary"] +postgresql_psycopg2cffi = ["psycopg2cffi"] +pymysql = ["pymysql"] + +[[package]] +category = "main" +description = "Modern high-performance serialization utilities for Python" +name = "srsly" +optional = true +python-versions = "*" +version = "1.0.2" + +[[package]] +category = "dev" +description = "SSH public key parser" +name = "sshpubkeys" +optional = false +python-versions = "*" +version = "3.1.0" + +[package.dependencies] +cryptography = ">=2.1.4" +ecdsa = ">=0.13" + +[package.extras] +dev = ["twine", "wheel"] + +[[package]] +category = "main" +description = "Pretty-print tabular data" +name = "tabulate" +optional = false +python-versions = "*" +version = "0.8.7" + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +category = "main" +description = "TensorBoard lets you watch Tensors Flow" +name = "tensorboard" +optional = false +python-versions = ">= 2.7, != 3.0.*, != 3.1.*" +version = "2.3.0" + +[package.dependencies] +absl-py = ">=0.4" +google-auth = ">=1.6.3,<2" +google-auth-oauthlib = ">=0.4.1,<0.5" +grpcio = ">=1.24.3" +markdown = ">=2.6.8" +numpy = ">=1.12.0" +protobuf = ">=3.6.0" +requests = ">=2.21.0,<3" +setuptools = ">=41.0.0" +six = ">=1.10.0" +tensorboard-plugin-wit = ">=1.6.0" +werkzeug = ">=0.11.15" + +[package.dependencies.wheel] +python = ">=3" +version = ">=0.26" + +[[package]] +category = "main" +description = "What-If Tool TensorBoard plugin." +name = "tensorboard-plugin-wit" +optional = false +python-versions = "*" +version = "1.7.0" + +[[package]] +category = "main" +description = "TensorFlow is an open source machine learning framework for everyone." +name = "tensorflow" +optional = false +python-versions = "*" +version = "2.3.0" + +[package.dependencies] +absl-py = ">=0.7.0" +astunparse = "1.6.3" +gast = "0.3.3" +google-pasta = ">=0.1.8" +grpcio = ">=1.8.6" +h5py = ">=2.10.0,<2.11.0" +keras-preprocessing = ">=1.1.1,<1.2" +numpy = ">=1.16.0,<1.19.0" +opt-einsum = ">=2.3.2" +protobuf = ">=3.9.2" +scipy = "1.4.1" +six = ">=1.12.0" +tensorboard = ">=2.3.0,<3" +tensorflow-estimator = ">=2.3.0,<2.4.0" +termcolor = ">=1.1.0" +wheel = ">=0.26" +wrapt = ">=1.11.1" + +[[package]] +category = "main" +description = "TensorFlow Addons." +name = "tensorflow-addons" +optional = false +python-versions = "*" +version = "0.10.0" + +[package.dependencies] +typeguard = ">=2.7" + +[[package]] +category = "main" +description = "TensorFlow Estimator." +name = "tensorflow-estimator" +optional = false +python-versions = "*" +version = "2.3.0" + +[[package]] +category = "main" +description = "TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models." +name = "tensorflow-hub" +optional = false +python-versions = "*" +version = "0.8.0" + +[package.dependencies] +numpy = ">=1.12.0" +protobuf = ">=3.8.0" +six = ">=1.12.0" + +[package.extras] +make_image_classifier = ["keras-preprocessing"] +make_nearest_neighbour_index = ["apache-beam", "annoy"] + +[[package]] +category = "main" +description = "Probabilistic modeling and statistical inference in TensorFlow" +name = "tensorflow-probability" +optional = false +python-versions = "*" +version = "0.10.1" + +[package.dependencies] +cloudpickle = "1.3" +decorator = "*" +gast = ">=0.3.2" +numpy = ">=1.13.3" +six = ">=1.10.0" + +[package.extras] +jax = ["jax", "jaxlib"] + +[[package]] +category = "main" +description = "TF.Text is a TensorFlow library of text related ops, modules, and subgraphs." +name = "tensorflow-text" +optional = true +python-versions = "*" +version = "2.3.0" + +[package.dependencies] +tensorflow = ">=2.3.0,<2.4" + +[package.extras] +tensorflow_gpu = ["tensorflow-gpu (>=2.1.0,<2.2)"] +tests = ["absl-py", "pytest"] + +[[package]] +category = "main" +description = "ANSII Color formatting for output in terminal." +name = "termcolor" +optional = false +python-versions = "*" +version = "1.1.0" + +[[package]] +category = "main" +description = "Generate simple tables in terminals from a nested list of strings." +name = "terminaltables" +optional = false +python-versions = "*" +version = "3.1.0" + +[[package]] +category = "main" +description = "Practical Machine Learning for NLP" +name = "thinc" +optional = true +python-versions = "*" +version = "7.4.0" + +[package.dependencies] +blis = ">=0.4.0,<0.5.0" +catalogue = ">=0.0.7,<1.1.0" +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=0.28.0,<1.1.0" +numpy = ">=1.7.0" +plac = ">=0.9.6,<1.2.0" +preshed = ">=1.0.1,<3.1.0" +srsly = ">=0.0.6,<1.1.0" +tqdm = ">=4.10.0,<5.0.0" +wasabi = ">=0.0.9,<1.1.0" + +[package.extras] +cuda = ["cupy (>=5.0.0b4)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] + +[[package]] +category = "main" +description = "threadpoolctl" +name = "threadpoolctl" +optional = false +python-versions = ">=3.5" +version = "2.1.0" + +[[package]] +category = "main" +description = "Fast and Customizable Tokenizers" +name = "tokenizers" +optional = true +python-versions = "*" +version = "0.7.0" + +[package.extras] +testing = ["pytest"] + +[[package]] +category = "dev" +description = "Python Library for Tom's Obvious, Minimal Language" +name = "toml" +optional = false +python-versions = "*" +version = "0.10.1" + +[[package]] +category = "main" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +name = "tornado" +optional = false +python-versions = ">= 3.5" +version = "6.0.4" + +[[package]] +category = "dev" +description = "Building newsfiles for your project." +name = "towncrier" +optional = false +python-versions = "*" +version = "19.2.0" + +[package.dependencies] +Click = "*" +incremental = "*" +jinja2 = "*" +toml = "*" + +[[package]] +category = "main" +description = "Fast, Extensible Progress Meter" +name = "tqdm" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" +version = "4.47.0" + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "argopt", "pydoc-markdown"] + +[[package]] +category = "main" +description = "State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch" +name = "transformers" +optional = true +python-versions = ">=3.6.0" +version = "2.11.0" + +[package.dependencies] +filelock = "*" +numpy = "*" +packaging = "*" +regex = "!=2019.12.17" +requests = "*" +sacremoses = "*" +sentencepiece = "*" +tokenizers = "0.7.0" +tqdm = ">=4.27" + +[package.dependencies.dataclasses] +python = "<3.7" +version = "*" + +[package.extras] +all = ["pydantic", "uvicorn", "fastapi", "starlette", "tensorflow", "torch"] +dev = ["pytest", "pytest-xdist", "timeout-decorator", "black", "isort", "flake8", "mecab-python3", "scikit-learn", "tensorflow", "torch"] +docs = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme"] +mecab = ["mecab-python3"] +quality = ["black", "isort", "flake8"] +serving = ["pydantic", "uvicorn", "fastapi", "starlette"] +sklearn = ["scikit-learn"] +testing = ["pytest", "pytest-xdist", "timeout-decorator"] +tf = ["tensorflow", "onnxconverter-common", "keras2onnx"] +tf-cpu = ["tensorflow-cpu", "onnxconverter-common", "keras2onnx"] +torch = ["torch"] + +[[package]] +category = "main" +description = "Twilio API client and TwiML generator" +name = "twilio" +optional = false +python-versions = "*" +version = "6.42.0" + +[package.dependencies] +PyJWT = ">=1.4.2" +pytz = "*" +six = "*" + +[package.dependencies.requests] +python = ">=3.0" +version = ">=2.0.0" + +[[package]] +category = "dev" +description = "a fork of Python 2 and 3 ast modules with type comment support" +name = "typed-ast" +optional = false +python-versions = "*" +version = "1.4.1" + +[[package]] +category = "main" +description = "Run-time type checker for Python" +name = "typeguard" +optional = false +python-versions = ">=3.5.3" +version = "2.9.1" + +[package.extras] +doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] +test = ["pytest", "typing-extensions"] + +[[package]] +category = "main" +description = "Backported and Experimental Type Hints for Python 3.5+" +marker = "python_version < \"3.8\"" +name = "typing-extensions" +optional = false +python-versions = "*" +version = "3.7.4.2" + +[[package]] +category = "main" +description = "tzinfo object for the local timezone" +name = "tzlocal" +optional = false +python-versions = "*" +version = "2.1" + +[package.dependencies] +pytz = "*" + +[[package]] +category = "main" +description = "Ultra fast JSON encoder and decoder for Python" +name = "ujson" +optional = false +python-versions = ">=3.5" +version = "3.1.0" + +[[package]] +category = "main" +description = "URI templates" +name = "uritemplate" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "3.0.1" + +[[package]] +category = "main" +description = "HTTP library with thread-safe connection pooling, file post, and more." +name = "urllib3" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +version = "1.25.10" + +[package.extras] +brotli = ["brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=0.14)", "ipaddress"] +socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7,<2.0)"] + +[[package]] +category = "main" +description = "Fast implementation of asyncio event loop on top of libuv" +marker = "sys_platform != \"win32\" and implementation_name == \"cpython\"" +name = "uvloop" +optional = false +python-versions = "*" +version = "0.14.0" + +[[package]] +category = "main" +description = "A lightweight console printing and formatting toolkit" +name = "wasabi" +optional = true +python-versions = "*" +version = "0.7.1" + +[[package]] +category = "dev" +description = "Filesystem events monitoring" +name = "watchdog" +optional = false +python-versions = "*" +version = "0.10.3" + +[package.dependencies] +pathtools = ">=0.1.1" + +[package.extras] +watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"] + +[[package]] +category = "main" +description = "Measures the displayed width of unicode strings in a terminal" +name = "wcwidth" +optional = false +python-versions = "*" +version = "0.2.5" + +[[package]] +category = "main" +description = "Community-developed Python SDK for the Webex Teams APIs" +name = "webexteamssdk" +optional = false +python-versions = "*" +version = "1.3" + +[package.dependencies] +PyJWT = "*" +future = "*" +requests = ">=2.4.2" +requests-toolbelt = "*" + +[[package]] +category = "dev" +description = "WebSocket client for Python. hybi13 is supported." +name = "websocket-client" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.57.0" + +[package.dependencies] +six = "*" + +[[package]] +category = "main" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +name = "websockets" +optional = false +python-versions = ">=3.6" +version = "8.0.2" + +[[package]] +category = "main" +description = "The comprehensive WSGI web application library." +name = "werkzeug" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "1.0.1" + +[package.extras] +dev = ["pytest", "pytest-timeout", "coverage", "tox", "sphinx", "pallets-sphinx-themes", "sphinx-issues"] +watchdog = ["watchdog"] + +[[package]] +category = "main" +description = "A built-package format for Python" +name = "wheel" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +version = "0.35.1" + +[package.extras] +test = ["pytest (>=3.0.0)", "pytest-cov"] + +[[package]] +category = "main" +description = "Module for decorators, wrappers and monkey patching." +name = "wrapt" +optional = false +python-versions = "*" +version = "1.12.1" + +[[package]] +category = "dev" +description = "Makes working with XML feel like you are working with JSON" +name = "xmltodict" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.12.0" + +[[package]] +category = "main" +description = "Yet another URL library" +name = "yarl" +optional = false +python-versions = ">=3.5" +version = "1.5.1" + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[package.dependencies.typing-extensions] +python = "<3.8" +version = ">=3.7.4" + +[[package]] +category = "main" +description = "Backport of pathlib-compatible object wrapper for zip files" +marker = "python_version < \"3.7\" and python_version != \"3.4\" or python_version < \"3.8\"" +name = "zipp" +optional = false +python-versions = ">=3.6" +version = "3.1.0" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["jaraco.itertools", "func-timeout"] + +[extras] +convert = ["tensorflow-text"] +full = ["spacy", "tensorflow-text", "transformers", "jieba"] +gh-release-notes = ["github3.py"] +jieba = ["jieba"] +spacy = ["spacy"] +transformers = ["transformers"] + +[metadata] +content-hash = "ea58e7e93693e2027a6c2bd7b6f0131472a8367df4962b9ca676a5388c95350e" +lock-version = "1.0" +python-versions = ">=3.6,<3.9" + +[metadata.files] +absl-py = [ + {file = "absl-py-0.9.0.tar.gz", hash = "sha256:75e737d6ce7723d9ff9b7aa1ba3233c34be62ef18d5859e706b8fdc828989830"}, +] +aiofiles = [ + {file = "aiofiles-0.5.0-py3-none-any.whl", hash = "sha256:377fdf7815cc611870c59cbd07b68b180841d2a2b79812d8c218be02448c2acb"}, + {file = "aiofiles-0.5.0.tar.gz", hash = "sha256:98e6bcfd1b50f97db4980e182ddd509b7cc35909e903a8fe50d8849e02d815af"}, +] +aiohttp = [ + {file = "aiohttp-3.6.2-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e"}, + {file = "aiohttp-3.6.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec"}, + {file = "aiohttp-3.6.2-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48"}, + {file = "aiohttp-3.6.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59"}, + {file = "aiohttp-3.6.2-cp36-cp36m-win32.whl", hash = "sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a"}, + {file = "aiohttp-3.6.2-cp36-cp36m-win_amd64.whl", hash = "sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17"}, + {file = "aiohttp-3.6.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a"}, + {file = "aiohttp-3.6.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd"}, + {file = "aiohttp-3.6.2-cp37-cp37m-win32.whl", hash = "sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965"}, + {file = "aiohttp-3.6.2-cp37-cp37m-win_amd64.whl", hash = "sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654"}, + {file = "aiohttp-3.6.2-py3-none-any.whl", hash = "sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4"}, + {file = "aiohttp-3.6.2.tar.gz", hash = "sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326"}, +] +aioresponses = [ + {file = "aioresponses-0.6.4-py2.py3-none-any.whl", hash = "sha256:8e8b430aeddbacd25f4d94bfe11a46bc88a47be689df12c423e62cb86652ba3b"}, + {file = "aioresponses-0.6.4.tar.gz", hash = "sha256:4397ca736238a1ada8c7f47e557dda05e9ecfdd467b9f6b83871efd365af7e9f"}, +] +apipkg = [ + {file = "apipkg-1.5-py2.py3-none-any.whl", hash = "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"}, + {file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"}, +] +appdirs = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +apscheduler = [ + {file = "APScheduler-3.6.3-py2.py3-none-any.whl", hash = "sha256:e8b1ecdb4c7cb2818913f766d5898183c7cb8936680710a4d3a966e02262e526"}, + {file = "APScheduler-3.6.3.tar.gz", hash = "sha256:3bb5229eed6fbbdafc13ce962712ae66e175aa214c69bed35a06bffcf0c5e244"}, +] +astunparse = [ + {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, + {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, +] +async-generator = [ + {file = "async_generator-1.10-py3-none-any.whl", hash = "sha256:01c7bf666359b4967d2cda0000cc2e4af16a0ae098cbffcb8472fb9e8ad6585b"}, + {file = "async_generator-1.10.tar.gz", hash = "sha256:6ebb3d106c12920aaae42ccb6f787ef5eefdcdd166ea3d628fa8476abe712144"}, +] +async-timeout = [ + {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"}, + {file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"}, +] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"}, + {file = "attrs-19.3.0.tar.gz", hash = "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"}, +] +aws-sam-translator = [ + {file = "aws-sam-translator-1.26.0.tar.gz", hash = "sha256:1a3fd8e48a745967e8457b9cefdc3ad0f139ac4a25af4db9c13a9e1c19ea6910"}, + {file = "aws_sam_translator-1.26.0-py2-none-any.whl", hash = "sha256:de2f1b4efd83347639eb19fea37989e9da9a3c59da277320cf1e58a2f0ff6dd0"}, + {file = "aws_sam_translator-1.26.0-py3-none-any.whl", hash = "sha256:3a200e6475f11726732b9b9c070ca4d58d2fe5ecc40e8fb629b09a053fba5640"}, +] +aws-xray-sdk = [ + {file = "aws-xray-sdk-2.6.0.tar.gz", hash = "sha256:abf5b90f740e1f402e23414c9670e59cb9772e235e271fef2bce62b9100cbc77"}, + {file = "aws_xray_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:076f7c610cd3564bbba3507d43e328fb6ff4a2e841d3590f39b2c3ce99d41e1d"}, +] +azure-core = [ + {file = "azure-core-1.8.0.zip", hash = "sha256:c89bbdcdc13ad45fe57d775ed87b15baf6d0b039a1ecd0a1bc91d2f713cb1f08"}, + {file = "azure_core-1.8.0-py2.py3-none-any.whl", hash = "sha256:84bff2b05ce989942e7ca3a13237441fbd8ff6855aaf2979b2bc94b74a02be5f"}, +] +azure-storage-blob = [ + {file = "azure-storage-blob-12.3.2.zip", hash = "sha256:b99ce18c5063b22a988e6e997a491aab6c7c4dd62d1424b4e2b934e6ef104356"}, + {file = "azure_storage_blob-12.3.2-py2.py3-none-any.whl", hash = "sha256:8a02a33cd28a16963274dc928960642e99ec19cad27166fb386ebcc0f1216706"}, +] +black = [ + {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, + {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, +] +blis = [ + {file = "blis-0.4.1-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:135450caabc8aea9bb9250329ebdf7189982d9b57d5c92789b2ba2fe52c247a7"}, + {file = "blis-0.4.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:26b16d6005bb2671699831b5cc699905215d1abde1ec5c1d04de7dcd9eb29f75"}, + {file = "blis-0.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d1d59faebc1c94f8f4f77154ef4b9d6d40364b111cf8fde48ee3b524c85f1075"}, + {file = "blis-0.4.1-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:38fe877a4b52e762f5e137a412e3c256545a696a12ae8c40d67b8815d2bb5097"}, + {file = "blis-0.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:1402d9cbb0fbc21b749dd5b87d7ee14249e74a0ca38be6ecc56b3b356fca2f21"}, + {file = "blis-0.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:8aeaf6954351593a1e412f80e398aa51df588d3c0de74b9f3323b694c603381b"}, + {file = "blis-0.4.1-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:3347a4b1b7d3ae14476aac9a6f7bf8ebf464863f4ebf4aea228874a7694ea240"}, + {file = "blis-0.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:77a6486b9794af01bcdfd1bc6e067c93add4b93292e6f95bf6e5ce7f98bf0163"}, + {file = "blis-0.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f0b0dad4d6268d9dba0a65a9db12dd7a2d8686b648399e4aa1aec7550697e99e"}, + {file = "blis-0.4.1-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:4fb89c47ee06b58a4410a16fd5794847517262c9d2a342643475b477dfeff0a4"}, + {file = "blis-0.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:03c368c9716ca814c436550a5f1e02ccf74850e613602519e3941d212e5aa177"}, + {file = "blis-0.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ddd732c5274d1082fa92e2c42317587d5ebabce7741ca98120f69bd45d004b99"}, + {file = "blis-0.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ede123065f3cacb109967755b3d83d4ca0de90643a9058129a6ab2d4051954f"}, + {file = "blis-0.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:00473602629ba69fe6565108e21957e918cb48b59f5bf2f6bfb6e04de42500cb"}, + {file = "blis-0.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:856142a11e37fd2c47c5006a3197e157bb8469a491a73d2d442223dd3279df84"}, + {file = "blis-0.4.1.tar.gz", hash = "sha256:d69257d317e86f34a7f230a2fd1f021fd2a1b944137f40d8cdbb23bd334cd0c4"}, +] +boto = [ + {file = "boto-2.49.0-py2.py3-none-any.whl", hash = "sha256:147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8"}, + {file = "boto-2.49.0.tar.gz", hash = "sha256:ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"}, +] +boto3 = [ + {file = "boto3-1.14.43-py2.py3-none-any.whl", hash = "sha256:640a8372ce0edfbb84a8f63584a0b64c78d61a751a27c2a47f92d2ebaf021ce4"}, + {file = "boto3-1.14.43.tar.gz", hash = "sha256:a6c9a3d3abbad2ff2e5751af599492a9271633a7c9fef343482524464c53e451"}, +] +botocore = [ + {file = "botocore-1.17.43-py2.py3-none-any.whl", hash = "sha256:f8801ce7f7603922ccab1c86c448e802f94183e31d99457e85fb9985a20d3abc"}, + {file = "botocore-1.17.43.tar.gz", hash = "sha256:3fb144d2b5d705127f394f7483737ece6fa79577ca7c493e4f42047ac8636200"}, +] +cachetools = [ + {file = "cachetools-4.1.1-py3-none-any.whl", hash = "sha256:513d4ff98dd27f85743a8dc0e92f55ddb1b49e060c2d5961512855cda2c01a98"}, + {file = "cachetools-4.1.1.tar.gz", hash = "sha256:bbaa39c3dede00175df2dc2b03d0cf18dd2d32a7de7beb68072d13043c9edb20"}, +] +catalogue = [ + {file = "catalogue-1.0.0-py2.py3-none-any.whl", hash = "sha256:584d78e7f4c3c6e2fd498eb56dfc8ef1f4ff738480237de2ccd26cbe2cf47172"}, + {file = "catalogue-1.0.0.tar.gz", hash = "sha256:d74d1d856c6b36a37bf14aa6dbbc27d0582667b7ab979a6108e61a575e8723f5"}, +] +certifi = [ + {file = "certifi-2020.6.20-py2.py3-none-any.whl", hash = "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41"}, + {file = "certifi-2020.6.20.tar.gz", hash = "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3"}, +] +cffi = [ + {file = "cffi-1.14.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:da9d3c506f43e220336433dffe643fbfa40096d408cb9b7f2477892f369d5f82"}, + {file = "cffi-1.14.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23e44937d7695c27c66a54d793dd4b45889a81b35c0751ba91040fe825ec59c4"}, + {file = "cffi-1.14.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0da50dcbccd7cb7e6c741ab7912b2eff48e85af217d72b57f80ebc616257125e"}, + {file = "cffi-1.14.2-cp27-cp27m-win32.whl", hash = "sha256:76ada88d62eb24de7051c5157a1a78fd853cca9b91c0713c2e973e4196271d0c"}, + {file = "cffi-1.14.2-cp27-cp27m-win_amd64.whl", hash = "sha256:15a5f59a4808f82d8ec7364cbace851df591c2d43bc76bcbe5c4543a7ddd1bf1"}, + {file = "cffi-1.14.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e4082d832e36e7f9b2278bc774886ca8207346b99f278e54c9de4834f17232f7"}, + {file = "cffi-1.14.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:57214fa5430399dffd54f4be37b56fe22cedb2b98862550d43cc085fb698dc2c"}, + {file = "cffi-1.14.2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:6843db0343e12e3f52cc58430ad559d850a53684f5b352540ca3f1bc56df0731"}, + {file = "cffi-1.14.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:577791f948d34d569acb2d1add5831731c59d5a0c50a6d9f629ae1cefd9ca4a0"}, + {file = "cffi-1.14.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:8662aabfeab00cea149a3d1c2999b0731e70c6b5bac596d95d13f643e76d3d4e"}, + {file = "cffi-1.14.2-cp35-cp35m-win32.whl", hash = "sha256:837398c2ec00228679513802e3744d1e8e3cb1204aa6ad408b6aff081e99a487"}, + {file = "cffi-1.14.2-cp35-cp35m-win_amd64.whl", hash = "sha256:bf44a9a0141a082e89c90e8d785b212a872db793a0080c20f6ae6e2a0ebf82ad"}, + {file = "cffi-1.14.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:29c4688ace466a365b85a51dcc5e3c853c1d283f293dfcc12f7a77e498f160d2"}, + {file = "cffi-1.14.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:99cc66b33c418cd579c0f03b77b94263c305c389cb0c6972dac420f24b3bf123"}, + {file = "cffi-1.14.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:65867d63f0fd1b500fa343d7798fa64e9e681b594e0a07dc934c13e76ee28fb1"}, + {file = "cffi-1.14.2-cp36-cp36m-win32.whl", hash = "sha256:f5033952def24172e60493b68717792e3aebb387a8d186c43c020d9363ee7281"}, + {file = "cffi-1.14.2-cp36-cp36m-win_amd64.whl", hash = "sha256:7057613efefd36cacabbdbcef010e0a9c20a88fc07eb3e616019ea1692fa5df4"}, + {file = "cffi-1.14.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6539314d84c4d36f28d73adc1b45e9f4ee2a89cdc7e5d2b0a6dbacba31906798"}, + {file = "cffi-1.14.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:672b539db20fef6b03d6f7a14b5825d57c98e4026401fce838849f8de73fe4d4"}, + {file = "cffi-1.14.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:95e9094162fa712f18b4f60896e34b621df99147c2cee216cfa8f022294e8e9f"}, + {file = "cffi-1.14.2-cp37-cp37m-win32.whl", hash = "sha256:b9aa9d8818c2e917fa2c105ad538e222a5bce59777133840b93134022a7ce650"}, + {file = "cffi-1.14.2-cp37-cp37m-win_amd64.whl", hash = "sha256:e4b9b7af398c32e408c00eb4e0d33ced2f9121fd9fb978e6c1b57edd014a7d15"}, + {file = "cffi-1.14.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e613514a82539fc48291d01933951a13ae93b6b444a88782480be32245ed4afa"}, + {file = "cffi-1.14.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:9b219511d8b64d3fa14261963933be34028ea0e57455baf6781fe399c2c3206c"}, + {file = "cffi-1.14.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c0b48b98d79cf795b0916c57bebbc6d16bb43b9fc9b8c9f57f4cf05881904c75"}, + {file = "cffi-1.14.2-cp38-cp38-win32.whl", hash = "sha256:15419020b0e812b40d96ec9d369b2bc8109cc3295eac6e013d3261343580cc7e"}, + {file = "cffi-1.14.2-cp38-cp38-win_amd64.whl", hash = "sha256:12a453e03124069b6896107ee133ae3ab04c624bb10683e1ed1c1663df17c13c"}, + {file = "cffi-1.14.2.tar.gz", hash = "sha256:ae8f34d50af2c2154035984b8b5fc5d9ed63f32fe615646ab435b05b132ca91b"}, +] +cfn-lint = [ + {file = "cfn-lint-0.35.0.tar.gz", hash = "sha256:42023d89520e3a29891ec2eb4c326eef9d1f7516fe9abee8b6c97ce064187b45"}, + {file = "cfn_lint-0.35.0-py3-none-any.whl", hash = "sha256:8439925531fdd4c94e5b50974d067857b3af50b04b61254d3eae9b1e0ce20007"}, +] +chardet = [ + {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, + {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, +] +click = [ + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, +] +cloudpickle = [ + {file = "cloudpickle-1.3.0-py2.py3-none-any.whl", hash = "sha256:8664761f810efc07dbb301459e413c99b68fcc6d8703912bd39d86618ac631e3"}, + {file = "cloudpickle-1.3.0.tar.gz", hash = "sha256:38af54d0e7705d87a287bdefe1df00f936aadb1f629dca383e825cca927fa753"}, +] +colorama = [ + {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, + {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, +] +colorclass = [ + {file = "colorclass-2.2.0.tar.gz", hash = "sha256:b05c2a348dfc1aff2d502527d78a5b7b7e2f85da94a96c5081210d8e9ee8e18b"}, +] +coloredlogs = [ + {file = "coloredlogs-14.0-py2.py3-none-any.whl", hash = "sha256:346f58aad6afd48444c2468618623638dadab76e4e70d5e10822676f2d32226a"}, + {file = "coloredlogs-14.0.tar.gz", hash = "sha256:a1fab193d2053aa6c0a97608c4342d031f1f93a3d1218432c59322441d31a505"}, +] +colorhash = [ + {file = "colorhash-1.0.2-py2.py3-none-any.whl", hash = "sha256:f5a0019b7364fb12b0ed52e3fe05f69fe8c2d77622e6abf2f869e711308e14e1"}, + {file = "colorhash-1.0.2.tar.bz2", hash = "sha256:e0f925c3d82d5ed6e5fe8039492325263b5b75490bddf38e24a221467c3ee764"}, + {file = "colorhash-1.0.2.zip", hash = "sha256:5eeb6b490cbbf3545f50d39cf46993a1335b392b339cec6053f857e2785f83d8"}, +] +contextvars = [ + {file = "contextvars-2.4.tar.gz", hash = "sha256:f38c908aaa59c14335eeea12abea5f443646216c4e29380d7bf34d2018e2c39e"}, +] +coverage = [ + {file = "coverage-5.2.1-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4"}, + {file = "coverage-5.2.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01"}, + {file = "coverage-5.2.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8"}, + {file = "coverage-5.2.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59"}, + {file = "coverage-5.2.1-cp27-cp27m-win32.whl", hash = "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3"}, + {file = "coverage-5.2.1-cp27-cp27m-win_amd64.whl", hash = "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f"}, + {file = "coverage-5.2.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd"}, + {file = "coverage-5.2.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651"}, + {file = "coverage-5.2.1-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b"}, + {file = "coverage-5.2.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d"}, + {file = "coverage-5.2.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3"}, + {file = "coverage-5.2.1-cp35-cp35m-win32.whl", hash = "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0"}, + {file = "coverage-5.2.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962"}, + {file = "coverage-5.2.1-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082"}, + {file = "coverage-5.2.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716"}, + {file = "coverage-5.2.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb"}, + {file = "coverage-5.2.1-cp36-cp36m-win32.whl", hash = "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d"}, + {file = "coverage-5.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546"}, + {file = "coverage-5.2.1-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811"}, + {file = "coverage-5.2.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258"}, + {file = "coverage-5.2.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034"}, + {file = "coverage-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46"}, + {file = "coverage-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8"}, + {file = "coverage-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0"}, + {file = "coverage-5.2.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd"}, + {file = "coverage-5.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b"}, + {file = "coverage-5.2.1-cp38-cp38-win32.whl", hash = "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd"}, + {file = "coverage-5.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d"}, + {file = "coverage-5.2.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3"}, + {file = "coverage-5.2.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4"}, + {file = "coverage-5.2.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4"}, + {file = "coverage-5.2.1-cp39-cp39-win32.whl", hash = "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89"}, + {file = "coverage-5.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b"}, + {file = "coverage-5.2.1.tar.gz", hash = "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b"}, +] +coveralls = [ + {file = "coveralls-2.1.2-py2.py3-none-any.whl", hash = "sha256:b3b60c17b03a0dee61952a91aed6f131e0b2ac8bd5da909389c53137811409e1"}, + {file = "coveralls-2.1.2.tar.gz", hash = "sha256:4430b862baabb3cf090d36d84d331966615e4288d8a8c5957e0fd456d0dd8bd6"}, +] +cryptography = [ + {file = "cryptography-3.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:ab49edd5bea8d8b39a44b3db618e4783ef84c19c8b47286bf05dfdb3efb01c83"}, + {file = "cryptography-3.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:124af7255ffc8e964d9ff26971b3a6153e1a8a220b9a685dc407976ecb27a06a"}, + {file = "cryptography-3.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:51e40123083d2f946794f9fe4adeeee2922b581fa3602128ce85ff813d85b81f"}, + {file = "cryptography-3.0-cp27-cp27m-win32.whl", hash = "sha256:dea0ba7fe6f9461d244679efa968d215ea1f989b9c1957d7f10c21e5c7c09ad6"}, + {file = "cryptography-3.0-cp27-cp27m-win_amd64.whl", hash = "sha256:8ecf9400d0893836ff41b6f977a33972145a855b6efeb605b49ee273c5e6469f"}, + {file = "cryptography-3.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:0c608ff4d4adad9e39b5057de43657515c7da1ccb1807c3a27d4cf31fc923b4b"}, + {file = "cryptography-3.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bec7568c6970b865f2bcebbe84d547c52bb2abadf74cefce396ba07571109c67"}, + {file = "cryptography-3.0-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:0cbfed8ea74631fe4de00630f4bb592dad564d57f73150d6f6796a24e76c76cd"}, + {file = "cryptography-3.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:a09fd9c1cca9a46b6ad4bea0a1f86ab1de3c0c932364dbcf9a6c2a5eeb44fa77"}, + {file = "cryptography-3.0-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:ce82cc06588e5cbc2a7df3c8a9c778f2cb722f56835a23a68b5a7264726bb00c"}, + {file = "cryptography-3.0-cp35-cp35m-win32.whl", hash = "sha256:9367d00e14dee8d02134c6c9524bb4bd39d4c162456343d07191e2a0b5ec8b3b"}, + {file = "cryptography-3.0-cp35-cp35m-win_amd64.whl", hash = "sha256:384d7c681b1ab904fff3400a6909261cae1d0939cc483a68bdedab282fb89a07"}, + {file = "cryptography-3.0-cp36-cp36m-win32.whl", hash = "sha256:4d355f2aee4a29063c10164b032d9fa8a82e2c30768737a2fd56d256146ad559"}, + {file = "cryptography-3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:45741f5499150593178fc98d2c1a9c6722df88b99c821ad6ae298eff0ba1ae71"}, + {file = "cryptography-3.0-cp37-cp37m-win32.whl", hash = "sha256:8ecef21ac982aa78309bb6f092d1677812927e8b5ef204a10c326fc29f1367e2"}, + {file = "cryptography-3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4b9303507254ccb1181d1803a2080a798910ba89b1a3c9f53639885c90f7a756"}, + {file = "cryptography-3.0-cp38-cp38-win32.whl", hash = "sha256:8713ddb888119b0d2a1462357d5946b8911be01ddbf31451e1d07eaa5077a261"}, + {file = "cryptography-3.0-cp38-cp38-win_amd64.whl", hash = "sha256:bea0b0468f89cdea625bb3f692cd7a4222d80a6bdafd6fb923963f2b9da0e15f"}, + {file = "cryptography-3.0.tar.gz", hash = "sha256:8e924dbc025206e97756e8903039662aa58aa9ba357d8e1d8fc29e3092322053"}, +] +cycler = [ + {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, + {file = "cycler-0.10.0.tar.gz", hash = "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"}, +] +cymem = [ + {file = "cymem-2.0.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f4f19af4bca81f11922508a9dcf30ce1d2aee4972af9f81ce8e5331a6f46f5e1"}, + {file = "cymem-2.0.3-cp35-cp35m-win_amd64.whl", hash = "sha256:cd21ec48ee70878d46c486e2f7ae94b32bfc6b37c4d27876c5a5a00c4eb75c3c"}, + {file = "cymem-2.0.3-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:6f4cb689a9552e9e13dccc89203c8ab09f210a7ffb92ce27c384a4a0be27b527"}, + {file = "cymem-2.0.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7236252bed70f37b898933dcf8aa875d0829664a245a272516f27b30439df71c"}, + {file = "cymem-2.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:719f04a11ca709fc2b47868070d79fccff77e5d502ff32de2f4baa73cb16166f"}, + {file = "cymem-2.0.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:d7505c500d994f11662e5595f5002251f572acc189f18944619352e2636f5181"}, + {file = "cymem-2.0.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c288a1bbdf58c360457443e5297e74844e1961e5e7001dbcb3a5297a41911a11"}, + {file = "cymem-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:7f5ddceb12b73f7fd2e4398266401b6f887003740ccd18c989a2af04500b5f2b"}, + {file = "cymem-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:622c20a57701d02f01a47e856dea248e112638f28c8249dbe3ed95a9702e3d74"}, + {file = "cymem-2.0.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:85b9364e099426bd7f445a7705aad87bf6dbb71d79e3802dd8ca14e181d38a33"}, + {file = "cymem-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd24848fbd75b17bab06408da6c029ba7cc615bd9e4a1f755fb3a090025fb922"}, + {file = "cymem-2.0.3.tar.gz", hash = "sha256:5083b2ab5fe13ced094a82e0df465e2dbbd9b1c013288888035e24fd6eb4ed01"}, +] +dataclasses = [ + {file = "dataclasses-0.6-py3-none-any.whl", hash = "sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f"}, + {file = "dataclasses-0.6.tar.gz", hash = "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84"}, +] +decorator = [ + {file = "decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760"}, + {file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"}, +] +dnspython = [ + {file = "dnspython-1.16.0-py2.py3-none-any.whl", hash = "sha256:f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"}, + {file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"}, +] +docker = [ + {file = "docker-4.3.0-py2.py3-none-any.whl", hash = "sha256:ba118607b0ba6bfc1b236ec32019a355c47b5d012d01d976467d4692ef443929"}, + {file = "docker-4.3.0.tar.gz", hash = "sha256:431a268f2caf85aa30613f9642da274c62f6ee8bae7d70d968e01529f7d6af93"}, +] +docopt = [ + {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, +] +docspec = [ + {file = "docspec-0.2.0-py3-none-any.whl", hash = "sha256:a5bf4bfe5e940de080b282045db4f2a6aea3e745f639d34262f59791fb18107c"}, + {file = "docspec-0.2.0.tar.gz", hash = "sha256:058894861bff860f7053188c94cbd55d1777d5d0f39f3f8487960965ab098191"}, +] +docspec-python = [ + {file = "docspec-python-0.0.7.tar.gz", hash = "sha256:13af366a2dc0efdc7488f7560ad169ee80992f7f792a6b8d7ad0097bba7a27ab"}, + {file = "docspec_python-0.0.7-py3-none-any.whl", hash = "sha256:9949eda3f964aacdf39d5c84e24a72464e7e190d982cbb2714e661124fa03524"}, +] +docutils = [ + {file = "docutils-0.15.2-py2-none-any.whl", hash = "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827"}, + {file = "docutils-0.15.2-py3-none-any.whl", hash = "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0"}, + {file = "docutils-0.15.2.tar.gz", hash = "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"}, +] +ecdsa = [ + {file = "ecdsa-0.15-py2.py3-none-any.whl", hash = "sha256:867ec9cf6df0b03addc8ef66b56359643cb5d0c1dc329df76ba7ecfe256c8061"}, + {file = "ecdsa-0.15.tar.gz", hash = "sha256:8f12ac317f8a1318efa75757ef0a651abe12e51fc1af8838fb91079445227277"}, +] +execnet = [ + {file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"}, + {file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"}, +] +fakeredis = [ + {file = "fakeredis-1.4.2-py3-none-any.whl", hash = "sha256:fdfe06f277092d022c271fcaefdc1f0c8d9bfa8cb15374cae41d66a20bd96d2b"}, + {file = "fakeredis-1.4.2.tar.gz", hash = "sha256:790c85ad0f3b2967aba1f51767021bc59760fcb612159584be018ea7384f7fd2"}, +] +fbmessenger = [ + {file = "fbmessenger-6.0.0-py2.py3-none-any.whl", hash = "sha256:82cffd6e2fe02bfcf8ed083c59bdddcfdaa594dd0040f0c49eabbaf0e58d974c"}, + {file = "fbmessenger-6.0.0.tar.gz", hash = "sha256:6e42c4588a4c942547be228886278bbc7a084e0b34799c7e6ebd786129f021e6"}, +] +filelock = [ + {file = "filelock-3.0.12-py3-none-any.whl", hash = "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"}, + {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"}, +] +flake8 = [ + {file = "flake8-3.8.3-py2.py3-none-any.whl", hash = "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c"}, + {file = "flake8-3.8.3.tar.gz", hash = "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"}, +] +freezegun = [ + {file = "freezegun-0.3.15-py2.py3-none-any.whl", hash = "sha256:82c757a05b7c7ca3e176bfebd7d6779fd9139c7cb4ef969c38a28d74deef89b2"}, + {file = "freezegun-0.3.15.tar.gz", hash = "sha256:e2062f2c7f95cc276a834c22f1a17179467176b624cc6f936e8bc3be5535ad1b"}, +] +future = [ + {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, +] +gast = [ + {file = "gast-0.3.3-py2.py3-none-any.whl", hash = "sha256:8f46f5be57ae6889a4e16e2ca113b1703ef17f2b0abceb83793eaba9e1351a45"}, + {file = "gast-0.3.3.tar.gz", hash = "sha256:b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57"}, +] +"github3.py" = [ + {file = "github3.py-1.3.0-py2.py3-none-any.whl", hash = "sha256:50833b5da35546b8cced0e8d7ff4c50a9afc2c8e46cc4d07dc4b66d26467c708"}, + {file = "github3.py-1.3.0.tar.gz", hash = "sha256:15a115c18f7bfcf934dfef7ab103844eb9f620c586bad65967708926da47cbda"}, +] +google-api-core = [ + {file = "google-api-core-1.22.1.tar.gz", hash = "sha256:35cba563034d668ae90ffe1f03193a84e745b38f09592f60258358b5e5ee6238"}, + {file = "google_api_core-1.22.1-py2.py3-none-any.whl", hash = "sha256:431839101b7edc7b0e6cccca0441cb9015f728fc5f098e146e123bf523e8cf71"}, +] +google-auth = [ + {file = "google-auth-1.20.1.tar.gz", hash = "sha256:2f34dd810090d0d4c9d5787c4ad7b4413d1fbfb941e13682c7a2298d3b6cdcc8"}, + {file = "google_auth-1.20.1-py2.py3-none-any.whl", hash = "sha256:ce1fb80b5c6d3dd038babcc43e221edeafefc72d983b3dc28b67b996f76f00b9"}, +] +google-auth-oauthlib = [ + {file = "google-auth-oauthlib-0.4.1.tar.gz", hash = "sha256:88d2cd115e3391eb85e1243ac6902e76e77c5fe438b7276b297fbe68015458dd"}, + {file = "google_auth_oauthlib-0.4.1-py2.py3-none-any.whl", hash = "sha256:a92a0f6f41a0fb6138454fbc02674e64f89d82a244ea32f98471733c8ef0e0e1"}, +] +google-cloud-core = [ + {file = "google-cloud-core-1.4.1.tar.gz", hash = "sha256:613e56f164b6bee487dd34f606083a0130f66f42f7b10f99730afdf1630df507"}, + {file = "google_cloud_core-1.4.1-py2.py3-none-any.whl", hash = "sha256:4c9e457fcfc026fdde2e492228f04417d4c717fb0f29f070122fb0ab89e34ebd"}, +] +google-cloud-storage = [ + {file = "google-cloud-storage-1.30.0.tar.gz", hash = "sha256:0634addb7576d48861d9963312fc82a0436042b8f282414ed58ca76d73edee54"}, + {file = "google_cloud_storage-1.30.0-py2.py3-none-any.whl", hash = "sha256:02ac63059c798d4b8ba9057921be745707dc2d3316f5f366de91c24cc23cd77e"}, +] +google-crc32c = [ + {file = "google-crc32c-0.1.0.tar.gz", hash = "sha256:ad3d9b4402d4a16673aba7e74feacd621678aef3a9e6c0a5fb4c7e133c39ac45"}, + {file = "google_crc32c-0.1.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:dab8d637d1467e8dd8e01f8d909c2b92102d9bf4a0e5bc4898c9c1aaccf52572"}, + {file = "google_crc32c-0.1.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:bce91c6fd8d32ea76c6162cbb7ed493939c85b8c0da41f194f9a7784e978dd91"}, + {file = "google_crc32c-0.1.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:8dec850f4a4afdc8721b675c549f127c7809d6e76afebb14b1acc58f456d1e10"}, + {file = "google_crc32c-0.1.0-cp35-cp35m-win32.whl", hash = "sha256:7232f2b5305f44fa5bfe01b094305cfab1ab1895091aebcc840f262ef8013271"}, + {file = "google_crc32c-0.1.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3b0f8b73a97be981a5b727526eb8087a8a33a103031e2ec799df66f7535a152e"}, + {file = "google_crc32c-0.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7496f57ba73f63ea0b36bdab961799d03f1e5d3b972ec00b93a3c13f94bf703a"}, + {file = "google_crc32c-0.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:b5806e9f3602e9ab237306700ace5121c8fc7f5cc5a59054255d874123144914"}, + {file = "google_crc32c-0.1.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:f42f3df1ac90326c1229ffb71471f1d1504f8c68fad6b627c996df732e800c6c"}, + {file = "google_crc32c-0.1.0-cp36-cp36m-win32.whl", hash = "sha256:4c8b6ea0fa71913b0e773b311001b390110d466f0c6536bf6bad2b712d11acf5"}, + {file = "google_crc32c-0.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:af1f4ef7c649ad637e7fdd1e6e8e5a1ef28b45325064f9c8b563fe7ef8444e4c"}, + {file = "google_crc32c-0.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb34587503cd052495df010474cf7ff408a43efc56360b1cc7563d1a849d4798"}, + {file = "google_crc32c-0.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:79bf4d11867b3adcb8110b1fafc7d8ca7cb8ee1cd1d65ceaffef5c945188b5b8"}, + {file = "google_crc32c-0.1.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:053abd3fed09a766a56129b2df1439cf691ac40443659e252cc2cf4ba440c0aa"}, + {file = "google_crc32c-0.1.0-cp37-cp37m-win32.whl", hash = "sha256:2e666e8cdd067ece9e7e2618634caa3aa33266da5c3e9666dd46e5d3e65b3538"}, + {file = "google_crc32c-0.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c9e222263c9028dca294611af0e51371afcfc9bc4781484909d50c6ca9862807"}, + {file = "google_crc32c-0.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcc75bc9ef5a0ba3989408a227f4e6b609e989427727f4bca3aaad1f2ba4c98d"}, + {file = "google_crc32c-0.1.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:57893cf04cfa924c4e75e9ba29c9878304687bd776f15fb02b6ecdb867d181a3"}, +] +google-pasta = [ + {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, + {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, + {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, +] +google-resumable-media = [ + {file = "google-resumable-media-0.7.1.tar.gz", hash = "sha256:57841f5e65fb285c01071f439724745b2549a72eb75e5fd979198eb518608ed0"}, + {file = "google_resumable_media-0.7.1-py2.py3-none-any.whl", hash = "sha256:0572998cc2c7ba9ca996337896a2f93dbe8bc88866ebd81c8b7f4d7b07222957"}, +] +googleapis-common-protos = [ + {file = "googleapis-common-protos-1.52.0.tar.gz", hash = "sha256:560716c807117394da12cecb0a54da5a451b5cf9866f1d37e9a5e2329a665351"}, + {file = "googleapis_common_protos-1.52.0-py2.py3-none-any.whl", hash = "sha256:c8961760f5aad9a711d37b675be103e0cc4e9a39327e0d6d857872f698403e24"}, +] +grpcio = [ + {file = "grpcio-1.31.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e8c3264b0fd728aadf3f0324471843f65bd3b38872bdab2a477e31ffb685dd5b"}, + {file = "grpcio-1.31.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5fb0923b16590bac338e92d98c7d8effb3cfad1d2e18c71bf86bde32c49cd6dd"}, + {file = "grpcio-1.31.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:58d7121f48cb94535a4cedcce32921d0d0a78563c7372a143dedeec196d1c637"}, + {file = "grpcio-1.31.0-cp27-cp27m-win32.whl", hash = "sha256:ea849210e7362559f326cbe603d5b8d8bb1e556e86a7393b5a8847057de5b084"}, + {file = "grpcio-1.31.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ba3e43cb984399064ffaa3c0997576e46a1e268f9da05f97cd9b272f0b59ee71"}, + {file = "grpcio-1.31.0-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:ebb2ca09fa17537e35508a29dcb05575d4d9401138a68e83d1c605d65e8a1770"}, + {file = "grpcio-1.31.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:292635f05b6ce33f87116951d0b3d8d330bdfc5cac74f739370d60981e8c256c"}, + {file = "grpcio-1.31.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:92e54ab65e782f227e751c7555918afaba8d1229601687e89b80c2b65d2f6642"}, + {file = "grpcio-1.31.0-cp35-cp35m-linux_armv7l.whl", hash = "sha256:013287f99c99b201aa8a5f6bc7918f616739b9be031db132d9e3b8453e95e151"}, + {file = "grpcio-1.31.0-cp35-cp35m-macosx_10_7_intel.whl", hash = "sha256:d2c5e05c257859febd03f5d81b5015e1946d6bcf475c7bf63ee99cea8ab0d590"}, + {file = "grpcio-1.31.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:c9016ab1eaf4e054099303287195f3746bd4e69f2631d040f9dca43e910a5408"}, + {file = "grpcio-1.31.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:baaa036540d7ace433bdf38a3fe5e41cf9f84cdf10a88bac805f678a7ca8ddcc"}, + {file = "grpcio-1.31.0-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:75e383053dccb610590aa53eed5278db5c09bf498d3b5105ce6c776478f59352"}, + {file = "grpcio-1.31.0-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:739a72abffbd36083ff7adbb862cf1afc1e311c35834bed9c0361d8e68b063e1"}, + {file = "grpcio-1.31.0-cp35-cp35m-win32.whl", hash = "sha256:f04c59d186af3157dc8811114130aaeae92e90a65283733f41de94eed484e1f7"}, + {file = "grpcio-1.31.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ef9fce98b6fe03874c2a6576b02aec1a0df25742cd67d1d7b75a49e30aa74225"}, + {file = "grpcio-1.31.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:08a9b648dbe8852ff94b73a1c96da126834c3057ba2301d13e8c4adff334c482"}, + {file = "grpcio-1.31.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c22b19abba63562a5a200e586b5bde39d26c8ec30c92e26d209d81182371693b"}, + {file = "grpcio-1.31.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:0397616355760cd8282ed5ea34d51830ae4cb6613b7e5f66bed3be5d041b8b9a"}, + {file = "grpcio-1.31.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:259240aab2603891553e17ad5b2655693df79e02a9b887ff605bdeb2fcd3dcc9"}, + {file = "grpcio-1.31.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:8ca26b489b5dc1e3d31807d329c23d6cb06fe40fbae25b0649b718947936e26a"}, + {file = "grpcio-1.31.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:bf39977282a79dc1b2765cc3402c0ada571c29a491caec6ed12c0993c1ec115e"}, + {file = "grpcio-1.31.0-cp36-cp36m-win32.whl", hash = "sha256:f5b0870b733bcb7b6bf05a02035e7aaf20f599d3802b390282d4c2309f825f1d"}, + {file = "grpcio-1.31.0-cp36-cp36m-win_amd64.whl", hash = "sha256:074871a184483d5cd0746fd01e7d214d3ee9d36e67e32a5786b0a21f29fb8304"}, + {file = "grpcio-1.31.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:220c46b1fc9c9a6fcca4caac398f08f0ed43cdd63c45b7458983c4a1575ef6df"}, + {file = "grpcio-1.31.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:7a11b1ebb3210f34913b8be6995936bf9ebc541a65ab69e75db5ce1fe5047e8f"}, + {file = "grpcio-1.31.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3c2aa6d7a5e5bf73fdb1715eee777efe06dd39df03383f1cc095b2fdb34883e6"}, + {file = "grpcio-1.31.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:e64bddd09842ef508d72ca354319b0eb126205d951e8ac3128fe9869bd563552"}, + {file = "grpcio-1.31.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5d7faa89992e015d245750ca9ac916c161bbf72777b2c60abc61da3fae41339e"}, + {file = "grpcio-1.31.0-cp37-cp37m-win32.whl", hash = "sha256:43d44548ad6ee738b941abd9f09e3b83a5c13f3e1410321023c3c148ba50e796"}, + {file = "grpcio-1.31.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bf00ab06ea4f89976288f4d6224d4aa120780e30c955d4f85c3214ada29b3ddf"}, + {file = "grpcio-1.31.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:344b50865914cc8e6d023457bffee9a640abb18f75d0f2bb519041961c748da9"}, + {file = "grpcio-1.31.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63ee8e02d04272c3d103f44b4bce5d43ea757dd288673cea212d2f7da27967d2"}, + {file = "grpcio-1.31.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:a9a7ae74cb3108e6457cf15532d4c300324b48fbcf3ef290bcd2835745f20510"}, + {file = "grpcio-1.31.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:64077e3a9a7cf2f59e6c76d503c8de1f18a76428f41a5b000dc53c48a0b772ff"}, + {file = "grpcio-1.31.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:8b42f0ac76be07a5fa31117a3388d754ad35ef05e2e34be185ca9ccbcfac2069"}, + {file = "grpcio-1.31.0-cp38-cp38-win32.whl", hash = "sha256:8002a89ea91c0078c15d3c0daf423fd4968946be78f08545e807ea9a5ff8054a"}, + {file = "grpcio-1.31.0-cp38-cp38-win_amd64.whl", hash = "sha256:0fa86ac4452602c79774783aa68979a1a7625ebb7eaabee2b6550b975b9d61e6"}, + {file = "grpcio-1.31.0.tar.gz", hash = "sha256:5043440c45c0a031f387e7f48527541c65d672005fb24cf18ef6857483557d39"}, +] +h11 = [ + {file = "h11-0.8.1-py2.py3-none-any.whl", hash = "sha256:f2b1ca39bfed357d1f19ac732913d5f9faa54a5062eca7d2ec3a916cfb7ae4c7"}, + {file = "h11-0.8.1.tar.gz", hash = "sha256:acca6a44cb52a32ab442b1779adf0875c443c689e9e028f8d831a3769f9c5208"}, +] +h2 = [ + {file = "h2-3.2.0-py2.py3-none-any.whl", hash = "sha256:61e0f6601fa709f35cdb730863b4e5ec7ad449792add80d1410d4174ed139af5"}, + {file = "h2-3.2.0.tar.gz", hash = "sha256:875f41ebd6f2c44781259005b157faed1a5031df3ae5aa7bcb4628a6c0782f14"}, +] +h5py = [ + {file = "h5py-2.10.0-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:ecf4d0b56ee394a0984de15bceeb97cbe1fe485f1ac205121293fc44dcf3f31f"}, + {file = "h5py-2.10.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:86868dc07b9cc8cb7627372a2e6636cdc7a53b7e2854ad020c9e9d8a4d3fd0f5"}, + {file = "h5py-2.10.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aac4b57097ac29089f179bbc2a6e14102dd210618e94d77ee4831c65f82f17c0"}, + {file = "h5py-2.10.0-cp27-cp27m-win32.whl", hash = "sha256:7be5754a159236e95bd196419485343e2b5875e806fe68919e087b6351f40a70"}, + {file = "h5py-2.10.0-cp27-cp27m-win_amd64.whl", hash = "sha256:13c87efa24768a5e24e360a40e0bc4c49bcb7ce1bb13a3a7f9902cec302ccd36"}, + {file = "h5py-2.10.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:79b23f47c6524d61f899254f5cd5e486e19868f1823298bc0c29d345c2447172"}, + {file = "h5py-2.10.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cbf28ae4b5af0f05aa6e7551cee304f1d317dbed1eb7ac1d827cee2f1ef97a99"}, + {file = "h5py-2.10.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:c0d4b04bbf96c47b6d360cd06939e72def512b20a18a8547fa4af810258355d5"}, + {file = "h5py-2.10.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:549ad124df27c056b2e255ea1c44d30fb7a17d17676d03096ad5cd85edb32dc1"}, + {file = "h5py-2.10.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:a5f82cd4938ff8761d9760af3274acf55afc3c91c649c50ab18fcff5510a14a5"}, + {file = "h5py-2.10.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3dad1730b6470fad853ef56d755d06bb916ee68a3d8272b3bab0c1ddf83bb99e"}, + {file = "h5py-2.10.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:063947eaed5f271679ed4ffa36bb96f57bc14f44dd4336a827d9a02702e6ce6b"}, + {file = "h5py-2.10.0-cp35-cp35m-win32.whl", hash = "sha256:c54a2c0dd4957776ace7f95879d81582298c5daf89e77fb8bee7378f132951de"}, + {file = "h5py-2.10.0-cp35-cp35m-win_amd64.whl", hash = "sha256:6998be619c695910cb0effe5eb15d3a511d3d1a5d217d4bd0bebad1151ec2262"}, + {file = "h5py-2.10.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:ff7d241f866b718e4584fa95f520cb19405220c501bd3a53ee11871ba5166ea2"}, + {file = "h5py-2.10.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:54817b696e87eb9e403e42643305f142cd8b940fe9b3b490bbf98c3b8a894cf4"}, + {file = "h5py-2.10.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d3c59549f90a891691991c17f8e58c8544060fdf3ccdea267100fa5f561ff62f"}, + {file = "h5py-2.10.0-cp36-cp36m-win32.whl", hash = "sha256:d7ae7a0576b06cb8e8a1c265a8bc4b73d05fdee6429bffc9a26a6eb531e79d72"}, + {file = "h5py-2.10.0-cp36-cp36m-win_amd64.whl", hash = "sha256:bffbc48331b4a801d2f4b7dac8a72609f0b10e6e516e5c480a3e3241e091c878"}, + {file = "h5py-2.10.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:51ae56894c6c93159086ffa2c94b5b3388c0400548ab26555c143e7cfa05b8e5"}, + {file = "h5py-2.10.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:16ead3c57141101e3296ebeed79c9c143c32bdd0e82a61a2fc67e8e6d493e9d1"}, + {file = "h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f0e25bb91e7a02efccb50aba6591d3fe2c725479e34769802fcdd4076abfa917"}, + {file = "h5py-2.10.0-cp37-cp37m-win32.whl", hash = "sha256:f23951a53d18398ef1344c186fb04b26163ca6ce449ebd23404b153fd111ded9"}, + {file = "h5py-2.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8bb1d2de101f39743f91512a9750fb6c351c032e5cd3204b4487383e34da7f75"}, + {file = "h5py-2.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64f74da4a1dd0d2042e7d04cf8294e04ddad686f8eba9bb79e517ae582f6668d"}, + {file = "h5py-2.10.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d35f7a3a6cefec82bfdad2785e78359a0e6a5fbb3f605dd5623ce88082ccd681"}, + {file = "h5py-2.10.0-cp38-cp38-win32.whl", hash = "sha256:6ef7ab1089e3ef53ca099038f3c0a94d03e3560e6aff0e9d6c64c55fb13fc681"}, + {file = "h5py-2.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:769e141512b54dee14ec76ed354fcacfc7d97fea5a7646b709f7400cf1838630"}, + {file = "h5py-2.10.0.tar.gz", hash = "sha256:84412798925dc870ffd7107f045d7659e60f5d46d1c70c700375248bf6bf512d"}, +] +hpack = [ + {file = "hpack-3.0.0-py2.py3-none-any.whl", hash = "sha256:0edd79eda27a53ba5be2dfabf3b15780928a0dff6eb0c60a3d6767720e970c89"}, + {file = "hpack-3.0.0.tar.gz", hash = "sha256:8eec9c1f4bfae3408a3f30500261f7e6a65912dc138526ea054f9ad98892e9d2"}, +] +hstspreload = [ + {file = "hstspreload-2020.8.12-py3-none-any.whl", hash = "sha256:64f4441066d5544873faccf2e0b5757c6670217d34dc31d362ca2977f44604ff"}, + {file = "hstspreload-2020.8.12.tar.gz", hash = "sha256:3f5c324b1eb9d924e32ffeb5fe265b879806b6e346b765f57566410344f4b41e"}, +] +httplib2 = [ + {file = "httplib2-0.18.1-py3-none-any.whl", hash = "sha256:ca2914b015b6247791c4866782fa6042f495b94401a0f0bd3e1d6e0ba2236782"}, + {file = "httplib2-0.18.1.tar.gz", hash = "sha256:8af66c1c52c7ffe1aa5dc4bcd7c769885254b0756e6e69f953c7f0ab49a70ba3"}, +] +httptools = [ + {file = "httptools-0.1.1-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:a2719e1d7a84bb131c4f1e0cb79705034b48de6ae486eb5297a139d6a3296dce"}, + {file = "httptools-0.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:fa3cd71e31436911a44620473e873a256851e1f53dee56669dae403ba41756a4"}, + {file = "httptools-0.1.1-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:86c6acd66765a934e8730bf0e9dfaac6fdcf2a4334212bd4a0a1c78f16475ca6"}, + {file = "httptools-0.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bc3114b9edbca5a1eb7ae7db698c669eb53eb8afbbebdde116c174925260849c"}, + {file = "httptools-0.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:ac0aa11e99454b6a66989aa2d44bca41d4e0f968e395a0a8f164b401fefe359a"}, + {file = "httptools-0.1.1-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:96da81e1992be8ac2fd5597bf0283d832287e20cb3cfde8996d2b00356d4e17f"}, + {file = "httptools-0.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:56b6393c6ac7abe632f2294da53f30d279130a92e8ae39d8d14ee2e1b05ad1f2"}, + {file = "httptools-0.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:96eb359252aeed57ea5c7b3d79839aaa0382c9d3149f7d24dd7172b1bcecb009"}, + {file = "httptools-0.1.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:fea04e126014169384dee76a153d4573d90d0cbd1d12185da089f73c78390437"}, + {file = "httptools-0.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:3592e854424ec94bd17dc3e0c96a64e459ec4147e6d53c0a42d0ebcef9cb9c5d"}, + {file = "httptools-0.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:0a4b1b2012b28e68306575ad14ad5e9120b34fccd02a81eb08838d7e3bbb48be"}, + {file = "httptools-0.1.1.tar.gz", hash = "sha256:41b573cf33f64a8f8f3400d0a7faf48e1888582b6f6e02b82b9bd4f0bf7497ce"}, +] +httpx = [ + {file = "httpx-0.9.3-py2.py3-none-any.whl", hash = "sha256:b06753331906495b76f3feee654d1f430e61eb1c72d2fff9604c37af83878fc9"}, + {file = "httpx-0.9.3.tar.gz", hash = "sha256:1291c5ad8c872668549abb99bf8d25d25bbcac4bac13863dee54252d66e90e6f"}, +] +humanfriendly = [ + {file = "humanfriendly-8.2-py2.py3-none-any.whl", hash = "sha256:e78960b31198511f45fd455534ae7645a6207d33e512d2e842c766d15d9c8080"}, + {file = "humanfriendly-8.2.tar.gz", hash = "sha256:bf52ec91244819c780341a3438d5d7b09f431d3f113a475147ac9b7b167a3d12"}, +] +hyperframe = [ + {file = "hyperframe-5.2.0-py2.py3-none-any.whl", hash = "sha256:5187962cb16dcc078f23cb5a4b110098d546c3f41ff2d4038a9896893bbd0b40"}, + {file = "hyperframe-5.2.0.tar.gz", hash = "sha256:a9f5c17f2cc3c719b917c4f33ed1c61bd1f8dfac4b1bd23b7c80b3400971b41f"}, +] +idna = [ + {file = "idna-2.8-py2.py3-none-any.whl", hash = "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"}, + {file = "idna-2.8.tar.gz", hash = "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407"}, +] +idna-ssl = [ + {file = "idna-ssl-1.1.0.tar.gz", hash = "sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c"}, +] +immutables = [ + {file = "immutables-0.14-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:860666fab142401a5535bf65cbd607b46bc5ed25b9d1eb053ca8ed9a1a1a80d6"}, + {file = "immutables-0.14-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:ce01788878827c3f0331c254a4ad8d9721489a5e65cc43e19c80040b46e0d297"}, + {file = "immutables-0.14-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:8797eed4042f4626b0bc04d9cf134208918eb0c937a8193a2c66df5041e62d2e"}, + {file = "immutables-0.14-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:33ce2f977da7b5e0dddd93744862404bdb316ffe5853ec853e53141508fa2e6a"}, + {file = "immutables-0.14-cp36-cp36m-win_amd64.whl", hash = "sha256:6c8eace4d98988c72bcb37c05e79aae756832738305ae9497670482a82db08bc"}, + {file = "immutables-0.14-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:ab6c18b7b2b2abc83e0edc57b0a38bf0915b271582a1eb8c7bed1c20398f8040"}, + {file = "immutables-0.14-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c099212fd6504513a50e7369fe281007c820cf9d7bb22a336486c63d77d6f0b2"}, + {file = "immutables-0.14-cp37-cp37m-win_amd64.whl", hash = "sha256:714aedbdeba4439d91cb5e5735cb10631fc47a7a69ea9cc8ecbac90322d50a4a"}, + {file = "immutables-0.14-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:1c11050c49e193a1ec9dda1747285333f6ba6a30bbeb2929000b9b1192097ec0"}, + {file = "immutables-0.14-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c453e12b95e1d6bb4909e8743f88b7f5c0c97b86a8bc0d73507091cb644e3c1e"}, + {file = "immutables-0.14-cp38-cp38-win_amd64.whl", hash = "sha256:ef9da20ec0f1c5853b5c8f8e3d9e1e15b8d98c259de4b7515d789a606af8745e"}, + {file = "immutables-0.14.tar.gz", hash = "sha256:a0a1cc238b678455145bae291d8426f732f5255537ed6a5b7645949704c70a78"}, +] +importlab = [ + {file = "importlab-0.5.1.tar.gz", hash = "sha256:d855350d19dc10a17aabd2fe6f4b428ff1a936071f692fbf686a73694d26a51c"}, +] +importlib-metadata = [ + {file = "importlib_metadata-1.7.0-py2.py3-none-any.whl", hash = "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"}, + {file = "importlib_metadata-1.7.0.tar.gz", hash = "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83"}, +] +importlib-resources = [ + {file = "importlib_resources-1.5.0-py2.py3-none-any.whl", hash = "sha256:85dc0b9b325ff78c8bef2e4ff42616094e16b98ebd5e3b50fe7e2f0bbcdcde49"}, + {file = "importlib_resources-1.5.0.tar.gz", hash = "sha256:6f87df66833e1942667108628ec48900e02a4ab4ad850e25fbf07cb17cf734ca"}, +] +incremental = [ + {file = "incremental-17.5.0-py2.py3-none-any.whl", hash = "sha256:717e12246dddf231a349175f48d74d93e2897244939173b01974ab6661406b9f"}, + {file = "incremental-17.5.0.tar.gz", hash = "sha256:7b751696aaf36eebfab537e458929e194460051ccad279c72b755a167eebd4b3"}, +] +ipaddress = [ + {file = "ipaddress-1.0.23-py2.py3-none-any.whl", hash = "sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc"}, + {file = "ipaddress-1.0.23.tar.gz", hash = "sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"}, +] +isodate = [ + {file = "isodate-0.6.0-py2.py3-none-any.whl", hash = "sha256:aa4d33c06640f5352aca96e4b81afd8ab3b47337cc12089822d6f322ac772c81"}, + {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, +] +jieba = [ + {file = "jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2"}, +] +jinja2 = [ + {file = "Jinja2-2.11.2-py2.py3-none-any.whl", hash = "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"}, + {file = "Jinja2-2.11.2.tar.gz", hash = "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0"}, +] +jmespath = [ + {file = "jmespath-0.10.0-py2.py3-none-any.whl", hash = "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"}, + {file = "jmespath-0.10.0.tar.gz", hash = "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9"}, +] +joblib = [ + {file = "joblib-0.15.1-py3-none-any.whl", hash = "sha256:6825784ffda353cc8a1be573118085789e5b5d29401856b35b756645ab5aecb5"}, + {file = "joblib-0.15.1.tar.gz", hash = "sha256:61e49189c84b3c5d99a969d314853f4d1d263316cc694bec17548ebaa9c47b6e"}, +] +jsondiff = [ + {file = "jsondiff-1.1.2.tar.gz", hash = "sha256:7e18138aecaa4a8f3b7ac7525b8466234e6378dd6cae702b982c9ed851d2ae21"}, +] +jsonpatch = [ + {file = "jsonpatch-1.26-py2.py3-none-any.whl", hash = "sha256:83ff23119b336ea2feffa682307eb7269b58097b4e88c089a4950d946442db16"}, + {file = "jsonpatch-1.26.tar.gz", hash = "sha256:e45df18b0ab7df1925f20671bbc3f6bd0b4b556fb4b9c5d97684b0a7eac01744"}, +] +jsonpickle = [ + {file = "jsonpickle-1.4.1-py2.py3-none-any.whl", hash = "sha256:8919c166bac0574e3d74425c7559434062002d9dfc0ac2afa6dc746ba4a19439"}, + {file = "jsonpickle-1.4.1.tar.gz", hash = "sha256:e8d4b7cd0bd6826001a74377df1079a76ad8bae0f909282de2554164c837c8ba"}, +] +jsonpointer = [ + {file = "jsonpointer-2.0-py2.py3-none-any.whl", hash = "sha256:ff379fa021d1b81ab539f5ec467c7745beb1a5671463f9dcc2b2d458bd361c1e"}, + {file = "jsonpointer-2.0.tar.gz", hash = "sha256:c192ba86648e05fdae4f08a17ec25180a9aef5008d973407b581798a83975362"}, +] +jsonschema = [ + {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, + {file = "jsonschema-3.2.0.tar.gz", hash = "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"}, +] +junit-xml = [ + {file = "junit_xml-1.9-py2.py3-none-any.whl", hash = "sha256:ec5ca1a55aefdd76d28fcc0b135251d156c7106fa979686a4b48d62b761b4732"}, +] +jwcrypto = [ + {file = "jwcrypto-0.7-py2.py3-none-any.whl", hash = "sha256:618ded1d25d3f806a1ab05cee42633a5a2787af33fca8d8f539b0aa1478b3728"}, + {file = "jwcrypto-0.7.tar.gz", hash = "sha256:adbe1f6266cde35d40d5de6d1419612b3bd4c869b9332c88c9d7a9163d305100"}, +] +kafka-python = [ + {file = "kafka-python-2.0.1.tar.gz", hash = "sha256:e59ad42dec8c7d54e3fbba0c1f8b54c44d92a3392d88242962d0c29803f2f6f8"}, + {file = "kafka_python-2.0.1-py2.py3-none-any.whl", hash = "sha256:513431184ecd08e706ca53421ff23e269fc052374084b45b49640419564dd704"}, +] +keras-preprocessing = [ + {file = "Keras_Preprocessing-1.1.2-py2.py3-none-any.whl", hash = "sha256:7b82029b130ff61cc99b55f3bd27427df4838576838c5b2f65940e4fcec99a7b"}, + {file = "Keras_Preprocessing-1.1.2.tar.gz", hash = "sha256:add82567c50c8bc648c14195bf544a5ce7c1f76761536956c3d2978970179ef3"}, +] +kiwisolver = [ + {file = "kiwisolver-1.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:443c2320520eda0a5b930b2725b26f6175ca4453c61f739fef7a5847bd262f74"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:efcf3397ae1e3c3a4a0a0636542bcad5adad3b1dd3e8e629d0b6e201347176c8"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fccefc0d36a38c57b7bd233a9b485e2f1eb71903ca7ad7adacad6c28a56d62d2"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:be046da49fbc3aa9491cc7296db7e8d27bcf0c3d5d1a40259c10471b014e4e0c"}, + {file = "kiwisolver-1.2.0-cp36-none-win32.whl", hash = "sha256:60a78858580761fe611d22127868f3dc9f98871e6fdf0a15cc4203ed9ba6179b"}, + {file = "kiwisolver-1.2.0-cp36-none-win_amd64.whl", hash = "sha256:556da0a5f60f6486ec4969abbc1dd83cf9b5c2deadc8288508e55c0f5f87d29c"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cc095a4661bdd8a5742aaf7c10ea9fac142d76ff1770a0f84394038126d8fc7"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c955791d80e464da3b471ab41eb65cf5a40c15ce9b001fdc5bbc241170de58ec"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:603162139684ee56bcd57acc74035fceed7dd8d732f38c0959c8bd157f913fec"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:63f55f490b958b6299e4e5bdac66ac988c3d11b7fafa522800359075d4fa56d1"}, + {file = "kiwisolver-1.2.0-cp37-none-win32.whl", hash = "sha256:03662cbd3e6729f341a97dd2690b271e51a67a68322affab12a5b011344b973c"}, + {file = "kiwisolver-1.2.0-cp37-none-win_amd64.whl", hash = "sha256:4eadb361baf3069f278b055e3bb53fa189cea2fd02cb2c353b7a99ebb4477ef1"}, + {file = "kiwisolver-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c31bc3c8e903d60a1ea31a754c72559398d91b5929fcb329b1c3a3d3f6e72113"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d52b989dc23cdaa92582ceb4af8d5bcc94d74b2c3e64cd6785558ec6a879793e"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e586b28354d7b6584d8973656a7954b1c69c93f708c0c07b77884f91640b7657"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:38d05c9ecb24eee1246391820ed7137ac42a50209c203c908154782fced90e44"}, + {file = "kiwisolver-1.2.0-cp38-none-win32.whl", hash = "sha256:d069ef4b20b1e6b19f790d00097a5d5d2c50871b66d10075dab78938dc2ee2cf"}, + {file = "kiwisolver-1.2.0-cp38-none-win_amd64.whl", hash = "sha256:18d749f3e56c0480dccd1714230da0f328e6e4accf188dd4e6884bdd06bf02dd"}, + {file = "kiwisolver-1.2.0.tar.gz", hash = "sha256:247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f"}, +] +markdown = [ + {file = "Markdown-3.2.2-py3-none-any.whl", hash = "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"}, + {file = "Markdown-3.2.2.tar.gz", hash = "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17"}, +] +markupsafe = [ + {file = "MarkupSafe-1.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-win32.whl", hash = "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-win_amd64.whl", hash = "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e"}, + {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f"}, + {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-win32.whl", hash = "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-win_amd64.whl", hash = "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-win32.whl", hash = "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-win32.whl", hash = "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-win32.whl", hash = "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-win32.whl", hash = "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"}, + {file = "MarkupSafe-1.1.1.tar.gz", hash = "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"}, +] +matplotlib = [ + {file = "matplotlib-3.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a47abc48c7b81fe6e636dde8a58e49b13d87d140e0f448213a4879f4a3f73345"}, + {file = "matplotlib-3.2.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:20bcd11efe194cd302bd0653cb025b8d16bcd80442359bfca8d49dc805f35ec8"}, + {file = "matplotlib-3.2.2-cp36-cp36m-win32.whl", hash = "sha256:2a6d64336b547e25730b6221e7aadfb01a391a065d43b5f51f0b9d7f673d2dd2"}, + {file = "matplotlib-3.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:4416825ebc9c1f135027a30e8d8aea0edcf45078ce767c7f7386737413cfb98f"}, + {file = "matplotlib-3.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:465c752278d27895e23f1379d6fcfa3a2990643b803c25e3bc16a10641d2346a"}, + {file = "matplotlib-3.2.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:81de040403a33bf3c68e9d4a40e26c8d24da00f7e3fadd845003b7e106785da7"}, + {file = "matplotlib-3.2.2-cp37-cp37m-win32.whl", hash = "sha256:006413f08ba5db1f5b1e0d6fbdc2ac9058b062ccf552f57182563a78579c34b4"}, + {file = "matplotlib-3.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:da06fa530591a141ffbe1712bbeec784734c3436b40c942d21652f305199b5d9"}, + {file = "matplotlib-3.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:894dd47c0a6ce38dc19bc87d1f7e2b0608310b2a18d1572291157450b05ce874"}, + {file = "matplotlib-3.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1ab264770e7cf2cf4feb99f22c737066aef21ddf1ec402dc255450ac15eacb7b"}, + {file = "matplotlib-3.2.2-cp38-cp38-win32.whl", hash = "sha256:91c153f4318e3c67c035fd1185f5ea2613f15008b73b66985033033f6fe54bbd"}, + {file = "matplotlib-3.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:a68e42e22f7fd190a532e4215e142276970c2d54040a0c46842fcb3db8b6ec5b"}, + {file = "matplotlib-3.2.2-cp39-cp39-win32.whl", hash = "sha256:647cf232ccf6265d2ba1ac4103e8c8b6ac7b03a40da3421234ffb03dda217f59"}, + {file = "matplotlib-3.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:31d32c83bb2b617377c6156f75e88b9ec2ded289e47ad4ff0f263dc1019d88b1"}, + {file = "matplotlib-3.2.2-pp36-pypy36_pp73-win32.whl", hash = "sha256:67065d938df34478451af62fbd0670d2b51c4d859fb66673064eb5de8660dd7c"}, + {file = "matplotlib-3.2.2.tar.gz", hash = "sha256:3d77a6630d093d74cbbfebaa0571d00790966be1ed204e4a8239f5cbd6835c5d"}, +] +mattermostwrapper = [ + {file = "mattermostwrapper-2.2.tar.gz", hash = "sha256:df17c4224b15c54d959addb12e83e3f1ada34bdb1fbed1048b7b9900d9cff53e"}, +] +mccabe = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] +mock = [ + {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, + {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, +] +mongomock = [ + {file = "mongomock-3.20.0-py2.py3-none-any.whl", hash = "sha256:f9c60ba13cea86ce532cf39091191237eaaf5a6360371d43d3b1dc624cb559f1"}, + {file = "mongomock-3.20.0.tar.gz", hash = "sha256:02f660a839a4623669fefc7168056a0a6e0c0f9d93b2b5eeaa52d17f3a642486"}, +] +more-itertools = [ + {file = "more-itertools-8.4.0.tar.gz", hash = "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5"}, + {file = "more_itertools-8.4.0-py3-none-any.whl", hash = "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2"}, +] +moto = [ + {file = "moto-1.3.14-py2.py3-none-any.whl", hash = "sha256:2b3fa22778504b45715868cad95ad458fdea7227f9005b12e522fc9c2ae0cabc"}, + {file = "moto-1.3.14.tar.gz", hash = "sha256:79aeaeed1592a24d3c488840065a3fcb3f4fa7ba40259e112482454c0e48a03a"}, +] +msrest = [ + {file = "msrest-0.6.18-py2.py3-none-any.whl", hash = "sha256:4993023011663b4273f15432fab75cc747dfa0bca1816d8122a7d1f9fdd9288d"}, + {file = "msrest-0.6.18.tar.gz", hash = "sha256:5f4ef9b8cc207d93978b1a58f055179686b9f30a5e28041872db97a4a1c49b96"}, +] +multidict = [ + {file = "multidict-4.7.6-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000"}, + {file = "multidict-4.7.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a"}, + {file = "multidict-4.7.6-cp35-cp35m-win32.whl", hash = "sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5"}, + {file = "multidict-4.7.6-cp35-cp35m-win_amd64.whl", hash = "sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3"}, + {file = "multidict-4.7.6-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87"}, + {file = "multidict-4.7.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2"}, + {file = "multidict-4.7.6-cp36-cp36m-win32.whl", hash = "sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7"}, + {file = "multidict-4.7.6-cp36-cp36m-win_amd64.whl", hash = "sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463"}, + {file = "multidict-4.7.6-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d"}, + {file = "multidict-4.7.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255"}, + {file = "multidict-4.7.6-cp37-cp37m-win32.whl", hash = "sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507"}, + {file = "multidict-4.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c"}, + {file = "multidict-4.7.6-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b"}, + {file = "multidict-4.7.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7"}, + {file = "multidict-4.7.6-cp38-cp38-win32.whl", hash = "sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d"}, + {file = "multidict-4.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19"}, + {file = "multidict-4.7.6.tar.gz", hash = "sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430"}, +] +murmurhash = [ + {file = "murmurhash-1.0.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:717196a04cdc80cc3103a3da17b2415a8a5e1d0d578b7079259386bf153b3258"}, + {file = "murmurhash-1.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:a6c071b4b498bcea16a8dc8590cad81fa8d43821f34c74bc00f96499e2527073"}, + {file = "murmurhash-1.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:d696c394ebd164ca80b5871e2e9ad2f9fdbb81bd3c552c1d5f1e8ee694e6204a"}, + {file = "murmurhash-1.0.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:27b908fe4bdb426f4e4e4a8821acbe0302915b2945e035ec9d8ca513e2a74b1f"}, + {file = "murmurhash-1.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:33405103fa8cde15d72ee525a03d5cfe2c7e4901133819754810986e29627d68"}, + {file = "murmurhash-1.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:3af36a0dc9f13f6892d9b8b39a6a3ccf216cae5bce38adc7c2d145677987772f"}, + {file = "murmurhash-1.0.2-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:fe344face8d30a5a6aa26e5acf288aa2a8f0f32e05efdda3d314b4bf289ec2af"}, + {file = "murmurhash-1.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:386a9eed3cb27cb2cd4394b6521275ba04552642c2d9cab5c9fb42aa5a3325c0"}, + {file = "murmurhash-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b0afe329701b59d02e56bc6cee7325af83e3fee9c299c615fc1df3202b4f886f"}, + {file = "murmurhash-1.0.2-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:bf33490514d308bcc27ed240cb3eb114f1ec31af031535cd8f27659a7049bd52"}, + {file = "murmurhash-1.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:8a4ed95cd3456b43ea301679c7c39ade43fc18b844b37d0ba0ac0d6acbff8e0c"}, + {file = "murmurhash-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:ba766343bdbcb928039b8fff609e80ae7a5fd5ed7a4fc5af822224b63e0cbaff"}, + {file = "murmurhash-1.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cc97ea766ac545074bab0e5af3dbc48e0d05ba230ae5a404e284d39abe4b3baf"}, + {file = "murmurhash-1.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8b045a79e8b621b4b35b29f29e33e9e0964f3a276f7da4d5736142f322ad4842"}, + {file = "murmurhash-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:f468e4868f78c3ac202a66abfe2866414bca4ae7666a21ef0938c423de0f7d50"}, + {file = "murmurhash-1.0.2.tar.gz", hash = "sha256:c7a646f6b07b033642b4f52ae2e45efd8b80780b3b90e8092a0cec935fbf81e2"}, +] +networkx = [ + {file = "networkx-2.4-py3-none-any.whl", hash = "sha256:cdfbf698749a5014bf2ed9db4a07a5295df1d3a53bf80bf3cbd61edf9df05fa1"}, + {file = "networkx-2.4.tar.gz", hash = "sha256:f8f4ff0b6f96e4f9b16af6b84622597b5334bf9cae8cf9b2e42e7985d5c95c64"}, +] +ninja = [ + {file = "ninja-1.10.0.post1-py2-none-macosx_10_6_x86_64.whl", hash = "sha256:5ae857e0283acbf4b3645756d9e8217fddbe1f41dfe33e2c40dc79cb69706a8c"}, + {file = "ninja-1.10.0.post1-py2-none-manylinux1_i686.whl", hash = "sha256:760de263a261919fc97cf1fd30d2dd8902dd89d5165d6cbf80ce3d66a39fff11"}, + {file = "ninja-1.10.0.post1-py2-none-manylinux1_x86_64.whl", hash = "sha256:3c206a4b8b896f396aeabfc0dbd99d84bc01306a3e07568d28d5536c24cbeaa3"}, + {file = "ninja-1.10.0.post1-py2-none-win32.whl", hash = "sha256:18bd4ebc6cef30981e966609362090a0d99aeca29a63ca83a3688305f1c35222"}, + {file = "ninja-1.10.0.post1-py2-none-win_amd64.whl", hash = "sha256:6ba8b42193600bfbde76dc32d7f6fd5675e253a9e5d7caad4a2735a84a72d491"}, + {file = "ninja-1.10.0.post1-py3-none-macosx_10_6_x86_64.whl", hash = "sha256:607211b652a32006cda8a72a1496c348ddadcbe30986ff264e7354972fa3194e"}, + {file = "ninja-1.10.0.post1-py3-none-manylinux1_i686.whl", hash = "sha256:3d4b1a3fa4d68c9dc74f50875c9bfe4eaaf495b5205d12526aea95043488c8b6"}, + {file = "ninja-1.10.0.post1-py3-none-manylinux1_x86_64.whl", hash = "sha256:fa6d68b4f65aca57594d3cccfcf8fa7c8a311e93c55eed8043cabc439617d7b7"}, + {file = "ninja-1.10.0.post1-py3-none-win32.whl", hash = "sha256:9897b92c626caabe51fce04a9be851f635ed828a55c558a9cf1a75571b4c4fce"}, + {file = "ninja-1.10.0.post1-py3-none-win_amd64.whl", hash = "sha256:39f9ab35f52b540777b77cc889ffed37182c7d55bec00f658f6f74bd5b1a4377"}, + {file = "ninja-1.10.0.post1.tar.gz", hash = "sha256:ddfac074ae408e42c617cd44f90a95bf6db94f0c846c95ef2a3a9a03438027a1"}, +] +"nr.collections" = [ + {file = "nr.collections-0.0.1.tar.gz", hash = "sha256:ddf38cd6379cac546ce7abdadf024fc01cca75540e11b1d5f1aa701a33817f1c"}, +] +"nr.databind.core" = [ + {file = "nr.databind.core-0.0.21-py2.py3-none-any.whl", hash = "sha256:fcee082fbf24be6db90e4c5a852a8c4d305f3ab226e09d9938dbee777d8e803c"}, + {file = "nr.databind.core-0.0.21.tar.gz", hash = "sha256:5f61f63294dea0fca8b71be6aec6df4b960bac25a7a74fbec666fadfd2b79968"}, +] +"nr.databind.json" = [ + {file = "nr.databind.json-0.0.13-py2.py3-none-any.whl", hash = "sha256:82af968a0eeb2c2ba2bc6f840c34b64eb4278035b9e3dfb9904e4e4ba519821d"}, + {file = "nr.databind.json-0.0.13.tar.gz", hash = "sha256:264c489a1c99228b47f290cd9523e44b348c63ef78515e2b9ca90b3deff5658b"}, +] +"nr.fs" = [ + {file = "nr.fs-1.6.2.tar.gz", hash = "sha256:9a9ae664d5ac1bd2b740d458a39378dd312d8054cfb8ec5c3aed09ea036fbaa8"}, +] +"nr.interface" = [ + {file = "nr.interface-0.0.3-py2.py3-none-any.whl", hash = "sha256:c2b806b82cfc23e871e3364e262da015c6bd99755a649d0da01ce31f9836b4a9"}, + {file = "nr.interface-0.0.3.tar.gz", hash = "sha256:58a99b5ec4d29edd32569a674c1a5cf929ebc34faec1bbb47edfc021821763e3"}, +] +"nr.metaclass" = [ + {file = "nr.metaclass-0.0.5.tar.gz", hash = "sha256:2a172e552fd1e799cdb0ecb8fd26eac8f6c22164d8dd4386f9315bfcefeb988b"}, +] +"nr.parsing.date" = [ + {file = "nr.parsing.date-0.3.0-py2.py3-none-any.whl", hash = "sha256:753a7b8890391aad7d7016621bcd7e5859e6f9cbf5e45fe2884d5de72d8f0b7c"}, + {file = "nr.parsing.date-0.3.0.tar.gz", hash = "sha256:02d91e12df4482109e1538c190ac9a11cbf9c680d0760813909747bc85654c9d"}, +] +"nr.pylang.utils" = [ + {file = "nr.pylang.utils-0.0.3-py3-none-any.whl", hash = "sha256:5346086992162981349ca73911d90bc17540f66e71584bdab25f926f3fb843a8"}, + {file = "nr.pylang.utils-0.0.3.tar.gz", hash = "sha256:b20b731b28dab81c8d498d7955a45afa3aa02e18c7b7398408985d53b5bb2a28"}, +] +"nr.stream" = [ + {file = "nr.stream-0.0.4-py2.py3-none-any.whl", hash = "sha256:3692a373e28890884657d21b4527318d98c47ba77de5bd248a7c227ef16e5045"}, + {file = "nr.stream-0.0.4.tar.gz", hash = "sha256:6a90c6bcb3a96ee1d11c0ababfe2fbacb0b5587a5b6c54dffb8c2c29ab5d7582"}, +] +"nr.sumtype" = [ + {file = "nr.sumtype-0.0.3.tar.gz", hash = "sha256:9c9e943e4ca10d158bd34a6a86d27799c01464651161f9e905de2f93f588eb64"}, +] +"nr.utils.re" = [ + {file = "nr.utils.re-0.1.0-py2.py3-none-any.whl", hash = "sha256:f9345351462e973a0991f17470f73e911cb226fd659d01d5178b166767a495fd"}, + {file = "nr.utils.re-0.1.0.tar.gz", hash = "sha256:7aad941dd92609a227c774ae21518fc804d613e1e6a787225a56b70075753388"}, +] +numpy = [ + {file = "numpy-1.18.5-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271"}, + {file = "numpy-1.18.5-cp35-cp35m-win32.whl", hash = "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3"}, + {file = "numpy-1.18.5-cp35-cp35m-win_amd64.whl", hash = "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1"}, + {file = "numpy-1.18.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"}, + {file = "numpy-1.18.5-cp36-cp36m-win32.whl", hash = "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5"}, + {file = "numpy-1.18.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161"}, + {file = "numpy-1.18.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f"}, + {file = "numpy-1.18.5-cp37-cp37m-win32.whl", hash = "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f"}, + {file = "numpy-1.18.5-cp37-cp37m-win_amd64.whl", hash = "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233"}, + {file = "numpy-1.18.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb"}, + {file = "numpy-1.18.5-cp38-cp38-win32.whl", hash = "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a"}, + {file = "numpy-1.18.5-cp38-cp38-win_amd64.whl", hash = "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f"}, + {file = "numpy-1.18.5.zip", hash = "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b"}, +] +oauth2client = [ + {file = "oauth2client-4.1.3-py2.py3-none-any.whl", hash = "sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac"}, + {file = "oauth2client-4.1.3.tar.gz", hash = "sha256:d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6"}, +] +oauthlib = [ + {file = "oauthlib-3.1.0-py2.py3-none-any.whl", hash = "sha256:df884cd6cbe20e32633f1db1072e9356f53638e4361bef4e8b03c9127c9328ea"}, + {file = "oauthlib-3.1.0.tar.gz", hash = "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889"}, +] +opt-einsum = [ + {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, + {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, +] +packaging = [ + {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, + {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, +] +pathspec = [ + {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, + {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, +] +pathtools = [ + {file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"}, +] +pep440-version-utils = [ + {file = "pep440-version-utils-0.3.0.tar.gz", hash = "sha256:ceb8c8da63b54cc555946d91829f72fe323f8d635b22fa54ef0a9800c37f50df"}, + {file = "pep440_version_utils-0.3.0-py3-none-any.whl", hash = "sha256:73780b2c31adad5ca35c89eb008f51c2a47aee0318debe31391b673b90577e1b"}, +] +pika = [ + {file = "pika-1.1.0-py2.py3-none-any.whl", hash = "sha256:4e1a1a6585a41b2341992ec32aadb7a919d649eb82904fd8e4a4e0871c8cf3af"}, + {file = "pika-1.1.0.tar.gz", hash = "sha256:9fa76ba4b65034b878b2b8de90ff8660a59d925b087c5bb88f8fdbb4b64a1dbf"}, +] +plac = [ + {file = "plac-1.1.3-py2.py3-none-any.whl", hash = "sha256:487e553017d419f35add346c4c09707e52fa53f7e7181ce1098ca27620e9ceee"}, + {file = "plac-1.1.3.tar.gz", hash = "sha256:398cb947c60c4c25e275e1f1dadf027e7096858fb260b8ece3b33bcff90d985f"}, +] +pluggy = [ + {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, + {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, +] +preshed = [ + {file = "preshed-3.0.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:448d9df12e63fe4a3024f6153ee6703bb95d2be0ce887b5eda7ddc41acfba825"}, + {file = "preshed-3.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:633358f1fb0ec5dd6dbe4971c328d08809e5a8dbefdf13a802ae0a7cb45306c7"}, + {file = "preshed-3.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:7ea588a78aaf310ae2c293071a8571b07ae434819be05fe510442b6df3f8fbf7"}, + {file = "preshed-3.0.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8a9a8222a697a513f25a94733e7a17cc298ecd8fd56b606a1d8fa0ac342c2830"}, + {file = "preshed-3.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:253970beae87ab672a6afb543908761795eea3cb7b0d784e2ea51e265752059e"}, + {file = "preshed-3.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:88427346b220293439db77c82913791fa13edc6ac73d8159610699a3ca17aae9"}, + {file = "preshed-3.0.2-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:6518bbd5fb8adbc3231e75ae78d96a7bdd5405a3b23a09d5e62a2e4fc833724e"}, + {file = "preshed-3.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1be3cb59211282e906a11443464fe3e19f6561e2fcd06410e4adc6d45354cf82"}, + {file = "preshed-3.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:ece5e850f667eaa3367d5c56dda9e3aa6ac1c0bb2117d2f466a26db5f26bbe4b"}, + {file = "preshed-3.0.2-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1ef72a120e49356058b3c0590d7b5e91f2747b44e006eef6579be6131223cab0"}, + {file = "preshed-3.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:7e80ffc1fb79496d4feafe0eaf71ee5e532b91daf6cec235d7f9c4c12657a58c"}, + {file = "preshed-3.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:0c15ae62f2595ca479decc3452967484dae57b510278800f5deb9115238cc818"}, + {file = "preshed-3.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e37058d91bd7f0f5a7a9c83d22a83dc581ab5f79688a87be81f200993145a250"}, + {file = "preshed-3.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b4ae6c7c44aa3ff7bd717791bb6b619ecb273b7cb128c986f2dc65f6e0e6ddd4"}, + {file = "preshed-3.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:190345724eb3f7aeaeb2a758740d698bd6c017c2cdf07c71c16b34820973d114"}, + {file = "preshed-3.0.2.tar.gz", hash = "sha256:61d73468c97c1d6d5a048de0b01d5a6fd052123358aca4823cdb277e436436cb"}, +] +prompt-toolkit = [ + {file = "prompt_toolkit-2.0.10-py2-none-any.whl", hash = "sha256:e7f8af9e3d70f514373bf41aa51bc33af12a6db3f71461ea47fea985defb2c31"}, + {file = "prompt_toolkit-2.0.10-py3-none-any.whl", hash = "sha256:46642344ce457641f28fc9d1c9ca939b63dadf8df128b86f1b9860e59c73a5e4"}, + {file = "prompt_toolkit-2.0.10.tar.gz", hash = "sha256:f15af68f66e664eaa559d4ac8a928111eebd5feda0c11738b5998045224829db"}, +] +protobuf = [ + {file = "protobuf-3.13.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9c2e63c1743cba12737169c447374fab3dfeb18111a460a8c1a000e35836b18c"}, + {file = "protobuf-3.13.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1e834076dfef9e585815757a2c7e4560c7ccc5962b9d09f831214c693a91b463"}, + {file = "protobuf-3.13.0-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:df3932e1834a64b46ebc262e951cd82c3cf0fa936a154f0a42231140d8237060"}, + {file = "protobuf-3.13.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:8c35bcbed1c0d29b127c886790e9d37e845ffc2725cc1db4bd06d70f4e8359f4"}, + {file = "protobuf-3.13.0-cp35-cp35m-win32.whl", hash = "sha256:339c3a003e3c797bc84499fa32e0aac83c768e67b3de4a5d7a5a9aa3b0da634c"}, + {file = "protobuf-3.13.0-cp35-cp35m-win_amd64.whl", hash = "sha256:361acd76f0ad38c6e38f14d08775514fbd241316cce08deb2ce914c7dfa1184a"}, + {file = "protobuf-3.13.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9edfdc679a3669988ec55a989ff62449f670dfa7018df6ad7f04e8dbacb10630"}, + {file = "protobuf-3.13.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5db9d3e12b6ede5e601b8d8684a7f9d90581882925c96acf8495957b4f1b204b"}, + {file = "protobuf-3.13.0-cp36-cp36m-win32.whl", hash = "sha256:c8abd7605185836f6f11f97b21200f8a864f9cb078a193fe3c9e235711d3ff1e"}, + {file = "protobuf-3.13.0-cp36-cp36m-win_amd64.whl", hash = "sha256:4d1174c9ed303070ad59553f435846a2f877598f59f9afc1b89757bdf846f2a7"}, + {file = "protobuf-3.13.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0bba42f439bf45c0f600c3c5993666fcb88e8441d011fad80a11df6f324eef33"}, + {file = "protobuf-3.13.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c0c5ab9c4b1eac0a9b838f1e46038c3175a95b0f2d944385884af72876bd6bc7"}, + {file = "protobuf-3.13.0-cp37-cp37m-win32.whl", hash = "sha256:f68eb9d03c7d84bd01c790948320b768de8559761897763731294e3bc316decb"}, + {file = "protobuf-3.13.0-cp37-cp37m-win_amd64.whl", hash = "sha256:91c2d897da84c62816e2f473ece60ebfeab024a16c1751aaf31100127ccd93ec"}, + {file = "protobuf-3.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3dee442884a18c16d023e52e32dd34a8930a889e511af493f6dc7d4d9bf12e4f"}, + {file = "protobuf-3.13.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e7662437ca1e0c51b93cadb988f9b353fa6b8013c0385d63a70c8a77d84da5f9"}, + {file = "protobuf-3.13.0-py2.py3-none-any.whl", hash = "sha256:d69697acac76d9f250ab745b46c725edf3e98ac24763990b24d58c16c642947a"}, + {file = "protobuf-3.13.0.tar.gz", hash = "sha256:6a82e0c8bb2bf58f606040cc5814e07715b2094caeba281e2e7d0b0e2e397db5"}, +] +psycopg2-binary = [ + {file = "psycopg2-binary-2.8.5.tar.gz", hash = "sha256:ccdc6a87f32b491129ada4b87a43b1895cf2c20fdb7f98ad979647506ffc41b6"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:96d3038f5bd061401996614f65d27a4ecb62d843eb4f48e212e6d129171a721f"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:08507efbe532029adee21b8d4c999170a83760d38249936038bd0602327029b5"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:b9a8b391c2b0321e0cd7ec6b4cfcc3dd6349347bd1207d48bcb752aa6c553a66"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27m-win32.whl", hash = "sha256:3286541b9d85a340ee4ed42732d15fc1bb441dc500c97243a768154ab8505bb5"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27m-win_amd64.whl", hash = "sha256:008da3ab51adc70a5f1cfbbe5db3a22607ab030eb44bcecf517ad11a0c2b3cac"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ba13346ff6d3eb2dca0b6fa0d8a9d999eff3dcd9b55f3a890f12b0b6362b2b38"}, + {file = "psycopg2_binary-2.8.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:c8830b7d5f16fd79d39b21e3d94f247219036b29b30c8270314c46bf8b732389"}, + {file = "psycopg2_binary-2.8.5-cp34-cp34m-win32.whl", hash = "sha256:51f7823f1b087d2020d8e8c9e6687473d3d239ba9afc162d9b2ab6e80b53f9f9"}, + {file = "psycopg2_binary-2.8.5-cp34-cp34m-win_amd64.whl", hash = "sha256:107d9be3b614e52a192719c6bf32e8813030020ea1d1215daa86ded9a24d8b04"}, + {file = "psycopg2_binary-2.8.5-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:930315ac53dc65cbf52ab6b6d27422611f5fb461d763c531db229c7e1af6c0b3"}, + {file = "psycopg2_binary-2.8.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:6bb2dd006a46a4a4ce95201f836194eb6a1e863f69ee5bab506673e0ca767057"}, + {file = "psycopg2_binary-2.8.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:3939cf75fc89c5e9ed836e228c4a63604dff95ad19aed2bbf71d5d04c15ed5ce"}, + {file = "psycopg2_binary-2.8.5-cp35-cp35m-win32.whl", hash = "sha256:a20299ee0ea2f9cca494396ac472d6e636745652a64a418b39522c120fd0a0a4"}, + {file = "psycopg2_binary-2.8.5-cp35-cp35m-win_amd64.whl", hash = "sha256:cc30cb900f42c8a246e2cb76539d9726f407330bc244ca7729c41a44e8d807fb"}, + {file = "psycopg2_binary-2.8.5-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:40abc319f7f26c042a11658bf3dd3b0b3bceccf883ec1c565d5c909a90204434"}, + {file = "psycopg2_binary-2.8.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:702f09d8f77dc4794651f650828791af82f7c2efd8c91ae79e3d9fe4bb7d4c98"}, + {file = "psycopg2_binary-2.8.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d1a8b01f6a964fec702d6b6dac1f91f2b9f9fe41b310cbb16c7ef1fac82df06d"}, + {file = "psycopg2_binary-2.8.5-cp36-cp36m-win32.whl", hash = "sha256:17a0ea0b0eabf07035e5e0d520dabc7950aeb15a17c6d36128ba99b2721b25b1"}, + {file = "psycopg2_binary-2.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:e004db88e5a75e5fdab1620fb9f90c9598c2a195a594225ac4ed2a6f1c23e162"}, + {file = "psycopg2_binary-2.8.5-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a34826d6465c2e2bbe9d0605f944f19d2480589f89863ed5f091943be27c9de4"}, + {file = "psycopg2_binary-2.8.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cac918cd7c4c498a60f5d2a61d4f0a6091c2c9490d81bc805c963444032d0dab"}, + {file = "psycopg2_binary-2.8.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:7b832d76cc65c092abd9505cc670c4e3421fd136fb6ea5b94efbe4c146572505"}, + {file = "psycopg2_binary-2.8.5-cp37-cp37m-win32.whl", hash = "sha256:bb0608694a91db1e230b4a314e8ed00ad07ed0c518f9a69b83af2717e31291a3"}, + {file = "psycopg2_binary-2.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:eb2f43ae3037f1ef5e19339c41cf56947021ac892f668765cd65f8ab9814192e"}, + {file = "psycopg2_binary-2.8.5-cp38-cp38-macosx_10_9_x86_64.macosx_10_9_intel.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:07cf82c870ec2d2ce94d18e70c13323c89f2f2a2628cbf1feee700630be2519a"}, + {file = "psycopg2_binary-2.8.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:a69970ee896e21db4c57e398646af9edc71c003bc52a3cc77fb150240fefd266"}, + {file = "psycopg2_binary-2.8.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7036ccf715925251fac969f4da9ad37e4b7e211b1e920860148a10c0de963522"}, + {file = "psycopg2_binary-2.8.5-cp38-cp38-win32.whl", hash = "sha256:8f74e631b67482d504d7e9cf364071fc5d54c28e79a093ff402d5f8f81e23bfa"}, + {file = "psycopg2_binary-2.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:fa466306fcf6b39b8a61d003123d442b23707d635a5cb05ac4e1b62cc79105cd"}, +] +py = [ + {file = "py-1.9.0-py2.py3-none-any.whl", hash = "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2"}, + {file = "py-1.9.0.tar.gz", hash = "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"}, +] +pyasn1 = [ + {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, + {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, + {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, + {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, + {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, + {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, + {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, + {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, + {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, + {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, + {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, + {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, +] +pyasn1-modules = [ + {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, + {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"}, + {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"}, + {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"}, + {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"}, + {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, + {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"}, + {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"}, + {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"}, + {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"}, + {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"}, + {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"}, + {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"}, +] +pycodestyle = [ + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, +] +pycparser = [ + {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"}, + {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"}, +] +pydoc-markdown = [] +pydot = [ + {file = "pydot-1.4.1-py2.py3-none-any.whl", hash = "sha256:67be714300c78fda5fd52f79ec994039e3f76f074948c67b5ff539b433ad354f"}, + {file = "pydot-1.4.1.tar.gz", hash = "sha256:d49c9d4dd1913beec2a997f831543c8cbd53e535b1a739e921642fe416235f01"}, +] +pyflakes = [ + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, +] +pyjwt = [ + {file = "PyJWT-1.7.1-py2.py3-none-any.whl", hash = "sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e"}, + {file = "PyJWT-1.7.1.tar.gz", hash = "sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96"}, +] +pykwalify = [ + {file = "pykwalify-1.7.0-py2.py3-none-any.whl", hash = "sha256:428733907fe5c458fbea5de63a755f938edccd622c7a1d0b597806141976f00e"}, + {file = "pykwalify-1.7.0.tar.gz", hash = "sha256:7e8b39c5a3a10bc176682b3bd9a7422c39ca247482df198b402e8015defcceb2"}, +] +pymongo = [ + {file = "pymongo-3.10.1-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:a732838c78554c1257ff2492f5c8c4c7312d0aecd7f732149e255f3749edd5ee"}, + {file = "pymongo-3.10.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:358ba4693c01022d507b96a980ded855a32dbdccc3c9331d0667be5e967f30ed"}, + {file = "pymongo-3.10.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:334ef3ffd0df87ea83a0054454336159f8ad9c1b389e19c0032d9cb8410660e6"}, + {file = "pymongo-3.10.1-cp27-cp27m-win32.whl", hash = "sha256:e5c54f04ca42bbb5153aec5d4f2e3d9f81e316945220ac318abd4083308143f5"}, + {file = "pymongo-3.10.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e334c4f39a2863a239d38b5829e442a87f241a92da9941861ee6ec5d6380b7fe"}, + {file = "pymongo-3.10.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:a0794e987d55d2f719cc95fcf980fc62d12b80e287e6a761c4be14c60bd9fecc"}, + {file = "pymongo-3.10.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bbf47110765b2a999803a7de457567389253f8670f7daafb98e059c899ce9764"}, + {file = "pymongo-3.10.1-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:56fa55032782b7f8e0bf6956420d11e2d4e9860598dfe9c504edec53af0fc372"}, + {file = "pymongo-3.10.1-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:da2c3220eb55c4239dd8b982e213da0b79023cac59fe54ca09365f2bc7e4ad32"}, + {file = "pymongo-3.10.1-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:0fc5aa1b1acf7f61af46fe0414e6a4d0c234b339db4c03a63da48599acf1cbfc"}, + {file = "pymongo-3.10.1-cp34-cp34m-win32.whl", hash = "sha256:2f07b27dbf303ea53f4147a7922ce91a26b34a0011131471d8aaf73151fdee9a"}, + {file = "pymongo-3.10.1-cp34-cp34m-win_amd64.whl", hash = "sha256:7aef381bb9ae8a3821abd7f9d4d93978dbd99072b48522e181baeffcd95b56ae"}, + {file = "pymongo-3.10.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:47a00b22c52ee59dffc2aad02d0bbfb20c26ec5b8de8900492bf13ad6901cf35"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7abc3a6825a346fa4621a6f63e3b662bbb9e0f6ffc32d30a459d695f20fb1a8b"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c0a57390549affc2b5dda24a38de03a5c7cbc58750cd161ff5d106c3c6eec80"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:e2b46e092ea54b732d98c476720386ff2ccd126de1e52076b470b117bff7e409"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:63752a72ca4d4e1386278bd43d14232f51718b409e7ac86bcf8810826b531113"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux2014_ppc64le.whl", hash = "sha256:b070a4f064a9edb70f921bfdc270725cff7a78c22036dd37a767c51393fb956f"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux2014_s390x.whl", hash = "sha256:6fdc5ccb43864065d40dd838437952e9e3da9821b7eac605ba46ada77f846bdf"}, + {file = "pymongo-3.10.1-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:a676bd2fbc2309092b9bbb0083d35718b5420af3a42135ebb1e4c3633f56604d"}, + {file = "pymongo-3.10.1-cp35-cp35m-win32.whl", hash = "sha256:c9ca545e93a9c2a3bdaa2e6e21f7a43267ff0813e8055adf2b591c13164c0c57"}, + {file = "pymongo-3.10.1-cp35-cp35m-win_amd64.whl", hash = "sha256:316f0cf543013d0c085e15a2c8abe0db70f93c9722c0f99b6f3318ff69477d70"}, + {file = "pymongo-3.10.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2a3c7ad01553b27ec553688a1e6445e7f40355fb37d925c11fcb50b504e367f8"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:444bf2f44264578c4085bb04493bfed0e5c1b4fe7c2704504d769f955cc78fe4"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f96333f9d2517c752c20a35ff95de5fc2763ac8cdb1653df0f6f45d281620606"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:a60756d55f0887023b3899e6c2923ba5f0042fb11b1d17810b4e07395404f33e"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:95f970f34b59987dee6f360d2e7d30e181d58957b85dff929eee4423739bd151"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:619ac9aaf681434b4d4718d1b31aa2f0fce64f2b3f8435688fcbdc0c818b6c54"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:19adf2848b80cb349b9891cc854581bbf24c338be9a3260e73159bdeb2264464"}, + {file = "pymongo-3.10.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:26e707a4eb851ec27bb969b5f1413b9b2eac28fe34271fa72329100317ea7c73"}, + {file = "pymongo-3.10.1-cp36-cp36m-win32.whl", hash = "sha256:18e84a3ec5e73adcb4187b8e5541b2ad61d716026ed9863267e650300d8bea33"}, + {file = "pymongo-3.10.1-cp36-cp36m-win_amd64.whl", hash = "sha256:568d6bee70652d8a5af1cd3eec48b4ca1696fb1773b80719ebbd2925b72cb8f6"}, + {file = "pymongo-3.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b6da85949aa91e9f8c521681344bd2e163de894a5492337fba8b05c409225a4f"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:01b4e10027aef5bb9ecefbc26f5df3368ce34aef81df43850f701e716e3fe16d"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:26798795097bdeb571f13942beef7e0b60125397811c75b7aa9214d89880dd1d"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3a6568bc53103df260f5c7d2da36dffc5202b9a36c85540bba1836a774943794"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:c4aef42e5fa4c9d5a99f751fb79caa880dac7eaf8a65121549318b984676a1b7"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:61235cc39b5b2f593086d1d38f3fc130b2d125bd8fc8621d35bc5b6bdeb92bd2"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:c06b3f998d2d7160db58db69adfb807d2ec307e883e2f17f6b87a1ef6c723f11"}, + {file = "pymongo-3.10.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:4c067db43b331fc709080d441cb2e157114fec60749667d12186cc3fc8e7a951"}, + {file = "pymongo-3.10.1-cp37-cp37m-win32.whl", hash = "sha256:c318fb70542be16d3d4063cde6010b1e4d328993a793529c15a619251f517c39"}, + {file = "pymongo-3.10.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4c092310f804a5d45a1bcaa4191d6d016c457b6ed3982a622c35f729ff1c7f6b"}, + {file = "pymongo-3.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dd8055da300535eefd446b30995c0813cc4394873c9509323762a93e97c04c03"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6238ac1f483494011abde5286282afdfacd8926659e222ba9b74c67008d3a58c"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:53b711b33134e292ef8499835a3df10909c58df53a2a0308f598c432e9a62892"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:20ee0475aa2ba437b0a14806f125d696f90a8433d820fb558fdd6f052acde103"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:80df3caf251fe61a3f0c9614adc6e2bfcffd1cd3345280896766712fb4b4d6d7"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:a3b98121e68bf370dd8ea09df67e916f93ea95b52fc010902312168c4d1aff5d"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:1396eb7151e0558b1f817e4b9d7697d5599e5c40d839a9f7270bd90af994ad82"}, + {file = "pymongo-3.10.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:ae65d65fde4135ef423a2608587c9ef585a3551fc2e4e431e7c7e527047581be"}, + {file = "pymongo-3.10.1-cp38-cp38-win32.whl", hash = "sha256:31d11a600eea0c60de22c8bdcb58cda63c762891facdcb74248c36713240987f"}, + {file = "pymongo-3.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a2c492680c61b440272341294172fa3b3751797b1ab983533a770e4fb0a67ac"}, + {file = "pymongo-3.10.1-py2.7-macosx-10.14-intel.egg", hash = "sha256:bd9c1e6f92b4888ae3ef7ae23262c513b962f09f3fb3b48581dde5df7d7a860a"}, + {file = "pymongo-3.10.1-py2.7-win-amd64.egg", hash = "sha256:ad3dc88dfe61f0f1f9b99c6bc833ea2f45203a937a18f0d2faa57c6952656012"}, + {file = "pymongo-3.10.1-py2.7-win32.egg", hash = "sha256:f4d06764a06b137e48db6d569dc95614d9d225c89842c885669ee8abc9f28c7a"}, + {file = "pymongo-3.10.1.tar.gz", hash = "sha256:993257f6ca3cde55332af1f62af3e04ca89ce63c08b56a387cdd46136c72f2fa"}, +] +pyparsing = [ + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, +] +pyreadline = [ + {file = "pyreadline-2.1.win-amd64.exe", hash = "sha256:9ce5fa65b8992dfa373bddc5b6e0864ead8f291c94fbfec05fbd5c836162e67b"}, + {file = "pyreadline-2.1.win32.exe", hash = "sha256:65540c21bfe14405a3a77e4c085ecfce88724743a4ead47c66b84defcf82c32e"}, + {file = "pyreadline-2.1.zip", hash = "sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1"}, +] +pyrsistent = [ + {file = "pyrsistent-0.16.0.tar.gz", hash = "sha256:28669905fe725965daa16184933676547c5bb40a5153055a8dee2a4bd7933ad3"}, +] +pytest = [ + {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, + {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, +] +pytest-asyncio = [ + {file = "pytest-asyncio-0.10.0.tar.gz", hash = "sha256:9fac5100fd716cbecf6ef89233e8590a4ad61d729d1732e0a96b84182df1daaf"}, + {file = "pytest_asyncio-0.10.0-py3-none-any.whl", hash = "sha256:d734718e25cfc32d2bf78d346e99d33724deeba774cc4afdf491530c6184b63b"}, +] +pytest-cov = [ + {file = "pytest-cov-2.10.1.tar.gz", hash = "sha256:47bd0ce14056fdd79f93e1713f88fad7bdcc583dcd7783da86ef2f085a0bb88e"}, + {file = "pytest_cov-2.10.1-py2.py3-none-any.whl", hash = "sha256:45ec2d5182f89a81fc3eb29e3d1ed3113b9e9a873bcddb2a71faaab066110191"}, +] +pytest-forked = [ + {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, + {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, +] +pytest-localserver = [ + {file = "pytest-localserver-0.5.0.tar.gz", hash = "sha256:3a5427909d1dfda10772c1bae4b9803679c0a8f04adb66c338ac607773bfefc2"}, +] +pytest-sanic = [ + {file = "pytest-sanic-1.6.1.tar.gz", hash = "sha256:99e02c28cfa18a0a9af0cd151dddf0eca373279b9bac808733746f7ed7030ecc"}, + {file = "pytest_sanic-1.6.1-py3-none-any.whl", hash = "sha256:f258c7e34818d316ff75882f865a8b08714c732461fad2d54fbe7bc7c266e6c7"}, +] +pytest-xdist = [ + {file = "pytest-xdist-1.34.0.tar.gz", hash = "sha256:340e8e83e2a4c0d861bdd8d05c5d7b7143f6eea0aba902997db15c2a86be04ee"}, + {file = "pytest_xdist-1.34.0-py2.py3-none-any.whl", hash = "sha256:ba5d10729372d65df3ac150872f9df5d2ed004a3b0d499cc0164aafedd8c7b66"}, +] +python-crfsuite = [ + {file = "python-crfsuite-0.9.7.tar.gz", hash = "sha256:3b4538d2ce5007e4e42005818247bf43ade89ef08a66d158462e2f7c5d63cee7"}, + {file = "python_crfsuite-0.9.7-cp27-cp27m-macosx_10_13_x86_64.whl", hash = "sha256:cd18b340c5a45ec200e8ce1167318dfc5d915ca9aad459dfa8675d014fd30650"}, + {file = "python_crfsuite-0.9.7-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:d386d3a1e8d2065b4770c0dd06877ac28d7a94f61cd8447af3fa7a49551e98f9"}, + {file = "python_crfsuite-0.9.7-cp27-cp27m-win32.whl", hash = "sha256:2390c7cf62c72179b96c130048cec981173d3873ded532f739ba5ff770ed2d39"}, + {file = "python_crfsuite-0.9.7-cp27-cp27m-win_amd64.whl", hash = "sha256:bb57e551d86c83ec6a719c9884c571cb9a9b013a78fe0c317b0677c3c9542965"}, + {file = "python_crfsuite-0.9.7-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b46811138745d9d62ff7674bc7a14a9cc974c065dadfc6f78e0dc19832066ec2"}, + {file = "python_crfsuite-0.9.7-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:4c9effa3cf7087cfecaa91ccada1ff9998b276bbde285700ef405345890253b1"}, + {file = "python_crfsuite-0.9.7-cp35-cp35m-win32.whl", hash = "sha256:b3da774cedf542202533b014347b86fbc25191356f0d5568f9784f8eb77e7ef6"}, + {file = "python_crfsuite-0.9.7-cp35-cp35m-win_amd64.whl", hash = "sha256:5ebb57783a0723d46d82d462fbfd6111e62e48533bfe1fbcd5ffb8dc1ba7a573"}, + {file = "python_crfsuite-0.9.7-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:9934e684ff89ae97be52971c4c127329b1e1604ada9f903c7427a7062f256fc6"}, + {file = "python_crfsuite-0.9.7-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:c56f34b50049de3127353214af45bc9b437fd6c23202b83abb0b8052d86a248b"}, + {file = "python_crfsuite-0.9.7-cp36-cp36m-win32.whl", hash = "sha256:8704a6b7c7c64c4aa158125c89e9e08377a0169e83c75094aa65833b771d3078"}, + {file = "python_crfsuite-0.9.7-cp36-cp36m-win_amd64.whl", hash = "sha256:1d2faa31771df2370bcf15855aa403416d14f088d3e81b19de857ea013a697b0"}, + {file = "python_crfsuite-0.9.7-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:d03ca82d34b45c6efa8f086eb05c7217e4a7fed34640e714775deaa08b61e6d2"}, + {file = "python_crfsuite-0.9.7-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0dc0149a62764e7d24d4f1a362f51b02e0283ac2b2469ce7f36666ece0b55855"}, + {file = "python_crfsuite-0.9.7-cp37-cp37m-win32.whl", hash = "sha256:b1568ab4c7a97f54b4d57f5b9795a4d6d841f7dc7923dd40414e34a93500cc42"}, + {file = "python_crfsuite-0.9.7-cp37-cp37m-win_amd64.whl", hash = "sha256:e905914a688138c29205a6752e768965ef3b0bfc46102b4a94316fd00dac7bc2"}, + {file = "python_crfsuite-0.9.7-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:397bac9cd4bae7a1b27d215c0119d33ff51c4ec5343d1f474867fd1a04c18a1d"}, + {file = "python_crfsuite-0.9.7-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:946ef3481c8dcd7c331123dd39b227cc52a386322967db78db650c58a6c972df"}, + {file = "python_crfsuite-0.9.7-cp38-cp38-win32.whl", hash = "sha256:df9edb37c90744c3aafd5d7dbf7c50fc486fe189e0e85a1deaf7af995ecac7b5"}, + {file = "python_crfsuite-0.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:caa980287a90fd8c659c6d936f5f4a5b28d0157ce530ad90a6430faed1cf147f"}, + {file = "python_crfsuite-0.9.7-py2.7-win-amd64.egg", hash = "sha256:a14959d27475f379711798e1cbdad79ebcab07976ea52d5b4862c36132ae16f5"}, + {file = "python_crfsuite-0.9.7-py2.7-win32.egg", hash = "sha256:9e8b03b02866c23e9618245757cf70cbdef18b9ce0893121c23ccd114fb78508"}, + {file = "python_crfsuite-0.9.7-py3.5-win-amd64.egg", hash = "sha256:09faa4425b9d8c128946c68c58c8efd5f28908ddf6b941af97475e2072f61495"}, + {file = "python_crfsuite-0.9.7-py3.5-win32.egg", hash = "sha256:4753c42cdd6c7f48ea745943f641c23d87a9547d22a07ea45903702cea1c7be2"}, + {file = "python_crfsuite-0.9.7-py3.6-win-amd64.egg", hash = "sha256:9aede38a4c93c90b9fa1b291c2e12521bcf718d6900beae0f933667f184c68ba"}, + {file = "python_crfsuite-0.9.7-py3.6-win32.egg", hash = "sha256:dfbfbfc298057e56532151910f042bb4b579502037d9403627a72cc51d572961"}, + {file = "python_crfsuite-0.9.7-py3.7-win-amd64.egg", hash = "sha256:ac25832a8ab55f3a0a91c863e7f4f270ccac9d34b2bf1e2ac457fc8e97c81ba2"}, + {file = "python_crfsuite-0.9.7-py3.7-win32.egg", hash = "sha256:468bcb736a98627df89708f631cfd0e0c5c7825b545ea1a1e91d7db2bbad88a6"}, + {file = "python_crfsuite-0.9.7-py3.8-win-amd64.egg", hash = "sha256:5cff06b51c16594ab4132d72a8b4b381ff4351a1825e388e120739c223ca849e"}, + {file = "python_crfsuite-0.9.7-py3.8-win32.egg", hash = "sha256:263f29c656fbb63d8d198d30ec9bca5b6fc7fab61fd20dd2f7cab795a613a85a"}, +] +python-dateutil = [ + {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, + {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, +] +python-engineio = [ + {file = "python-engineio-3.13.1.tar.gz", hash = "sha256:133bdb5fb89f43a53f8612fb1ddbb3a453318713dea18a9ecf5346ed0c0f793c"}, + {file = "python_engineio-3.13.1-py2.py3-none-any.whl", hash = "sha256:41353c2539493e9e30e0e75e53f9cbb670f09a5ebcf82fe738081a9ba28fe55c"}, +] +python-jose = [ + {file = "python-jose-3.1.0.tar.gz", hash = "sha256:8484b7fdb6962e9d242cce7680469ecf92bda95d10bbcbbeb560cacdff3abfce"}, + {file = "python_jose-3.1.0-py2.py3-none-any.whl", hash = "sha256:1ac4caf4bfebd5a70cf5bd82702ed850db69b0b6e1d0ae7368e5f99ac01c9571"}, +] +python-socketio = [ + {file = "python-socketio-4.6.0.tar.gz", hash = "sha256:358d8fbbc029c4538ea25bcaa283e47f375be0017fcba829de8a3a731c9df25a"}, + {file = "python_socketio-4.6.0-py2.py3-none-any.whl", hash = "sha256:d437f797c44b6efba2f201867cf02b8c96b97dff26d4e4281ac08b45817cd522"}, +] +python-telegram-bot = [ + {file = "python-telegram-bot-12.8.tar.gz", hash = "sha256:327186c56469216207dcdf8706892e58e0a62e51ef46f5143268e387bbb4edc3"}, + {file = "python_telegram_bot-12.8-py2.py3-none-any.whl", hash = "sha256:7eebed539ccacf77896cff9e41d1f68746b8ff3ca4da1e2e59285e9c749cb050"}, +] +pytype = [ + {file = "pytype-2020.8.10-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:da1977a1aa74fbd237e889c1d29421d490e0be9a91a22efd96fbca2570ef9165"}, + {file = "pytype-2020.8.10-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:e0909b99aff8eff0ece91fd64e00b935f0e4fecb51359d83d742b27db160dd00"}, + {file = "pytype-2020.8.10-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:6b0cd56b411738eb607a299437ac405cc94208875e97ba56332105103676a903"}, + {file = "pytype-2020.8.10-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:08662a6d426a4ef246ba36a807d526734f437451a78f83a140d338e305bc877a"}, + {file = "pytype-2020.8.10-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:768c9ea0b08f40ce8e1ed8b9207862394d770fe3340ebebfd4210a82af530d67"}, + {file = "pytype-2020.8.10-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:cd4399abd26a4a3498eca1ac4ad264cb407b17dfd826f9d5f14d6c06c78cf42a"}, + {file = "pytype-2020.8.10.tar.gz", hash = "sha256:6385b6837a6db69c42eb477e8f7539c0b986ec6753eab4d811553d63d58a7785"}, +] +pytz = [ + {file = "pytz-2020.1-py2.py3-none-any.whl", hash = "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed"}, + {file = "pytz-2020.1.tar.gz", hash = "sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048"}, +] +pywin32 = [ + {file = "pywin32-227-cp27-cp27m-win32.whl", hash = "sha256:371fcc39416d736401f0274dd64c2302728c9e034808e37381b5e1b22be4a6b0"}, + {file = "pywin32-227-cp27-cp27m-win_amd64.whl", hash = "sha256:4cdad3e84191194ea6d0dd1b1b9bdda574ff563177d2adf2b4efec2a244fa116"}, + {file = "pywin32-227-cp35-cp35m-win32.whl", hash = "sha256:f4c5be1a293bae0076d93c88f37ee8da68136744588bc5e2be2f299a34ceb7aa"}, + {file = "pywin32-227-cp35-cp35m-win_amd64.whl", hash = "sha256:a929a4af626e530383a579431b70e512e736e9588106715215bf685a3ea508d4"}, + {file = "pywin32-227-cp36-cp36m-win32.whl", hash = "sha256:300a2db938e98c3e7e2093e4491439e62287d0d493fe07cce110db070b54c0be"}, + {file = "pywin32-227-cp36-cp36m-win_amd64.whl", hash = "sha256:9b31e009564fb95db160f154e2aa195ed66bcc4c058ed72850d047141b36f3a2"}, + {file = "pywin32-227-cp37-cp37m-win32.whl", hash = "sha256:47a3c7551376a865dd8d095a98deba954a98f326c6fe3c72d8726ca6e6b15507"}, + {file = "pywin32-227-cp37-cp37m-win_amd64.whl", hash = "sha256:31f88a89139cb2adc40f8f0e65ee56a8c585f629974f9e07622ba80199057511"}, + {file = "pywin32-227-cp38-cp38-win32.whl", hash = "sha256:7f18199fbf29ca99dff10e1f09451582ae9e372a892ff03a28528a24d55875bc"}, + {file = "pywin32-227-cp38-cp38-win_amd64.whl", hash = "sha256:7c1ae32c489dc012930787f06244426f8356e129184a02c25aef163917ce158e"}, + {file = "pywin32-227-cp39-cp39-win32.whl", hash = "sha256:c054c52ba46e7eb6b7d7dfae4dbd987a1bb48ee86debe3f245a2884ece46e295"}, + {file = "pywin32-227-cp39-cp39-win_amd64.whl", hash = "sha256:f27cec5e7f588c3d1051651830ecc00294f90728d19c3bf6916e6dba93ea357c"}, +] +pyyaml = [ + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, +] +questionary = [ + {file = "questionary-1.5.2-py3-none-any.whl", hash = "sha256:6998a1fe0639daec0da44e0a973f387e7c778bdc418d76ecfa45a7b3a0997049"}, + {file = "questionary-1.5.2.tar.gz", hash = "sha256:f6e41e36b6c86fe0c3ff12a30c6c6a4e80129efba5ad0a115d71fd5df119c726"}, +] +rasa-sdk = [ + {file = "rasa-sdk-2.0.0a2.tar.gz", hash = "sha256:172673b9d80919e53586c830ee7a890f98aaf6cd9f6f351b532fd97c86af0337"}, + {file = "rasa_sdk-2.0.0a2-py3-none-any.whl", hash = "sha256:fb5832178f5422b7a3359b548f0dac8425141fdbbb6d903fc3a70103d7f447a8"}, +] +redis = [ + {file = "redis-3.5.3-py2.py3-none-any.whl", hash = "sha256:432b788c4530cfe16d8d943a09d40ca6c16149727e4afe8c2c9d5580c59d9f24"}, + {file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"}, +] +regex = [ + {file = "regex-2020.6.8-cp27-cp27m-win32.whl", hash = "sha256:fbff901c54c22425a5b809b914a3bfaf4b9570eee0e5ce8186ac71eb2025191c"}, + {file = "regex-2020.6.8-cp27-cp27m-win_amd64.whl", hash = "sha256:112e34adf95e45158c597feea65d06a8124898bdeac975c9087fe71b572bd938"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:92d8a043a4241a710c1cf7593f5577fbb832cf6c3a00ff3fc1ff2052aff5dd89"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bae83f2a56ab30d5353b47f9b2a33e4aac4de9401fb582b55c42b132a8ac3868"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:b2ba0f78b3ef375114856cbdaa30559914d081c416b431f2437f83ce4f8b7f2f"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:95fa7726d073c87141f7bbfb04c284901f8328e2d430eeb71b8ffdd5742a5ded"}, + {file = "regex-2020.6.8-cp36-cp36m-win32.whl", hash = "sha256:e3cdc9423808f7e1bb9c2e0bdb1c9dc37b0607b30d646ff6faf0d4e41ee8fee3"}, + {file = "regex-2020.6.8-cp36-cp36m-win_amd64.whl", hash = "sha256:c78e66a922de1c95a208e4ec02e2e5cf0bb83a36ceececc10a72841e53fbf2bd"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:08997a37b221a3e27d68ffb601e45abfb0093d39ee770e4257bd2f5115e8cb0a"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2f6f211633ee8d3f7706953e9d3edc7ce63a1d6aad0be5dcee1ece127eea13ae"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:55b4c25cbb3b29f8d5e63aeed27b49fa0f8476b0d4e1b3171d85db891938cc3a"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:89cda1a5d3e33ec9e231ece7307afc101b5217523d55ef4dc7fb2abd6de71ba3"}, + {file = "regex-2020.6.8-cp37-cp37m-win32.whl", hash = "sha256:690f858d9a94d903cf5cada62ce069b5d93b313d7d05456dbcd99420856562d9"}, + {file = "regex-2020.6.8-cp37-cp37m-win_amd64.whl", hash = "sha256:1700419d8a18c26ff396b3b06ace315b5f2a6e780dad387e4c48717a12a22c29"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux1_i686.whl", hash = "sha256:654cb773b2792e50151f0e22be0f2b6e1c3a04c5328ff1d9d59c0398d37ef610"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:52e1b4bef02f4040b2fd547357a170fc1146e60ab310cdbdd098db86e929b387"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:cf59bbf282b627130f5ba68b7fa3abdb96372b24b66bdf72a4920e8153fc7910"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5aaa5928b039ae440d775acea11d01e42ff26e1561c0ffcd3d805750973c6baf"}, + {file = "regex-2020.6.8-cp38-cp38-win32.whl", hash = "sha256:97712e0d0af05febd8ab63d2ef0ab2d0cd9deddf4476f7aa153f76feef4b2754"}, + {file = "regex-2020.6.8-cp38-cp38-win_amd64.whl", hash = "sha256:6ad8663c17db4c5ef438141f99e291c4d4edfeaacc0ce28b5bba2b0bf273d9b5"}, + {file = "regex-2020.6.8.tar.gz", hash = "sha256:e9b64e609d37438f7d6e68c2546d2cb8062f3adb27e6336bc129b51be20773ac"}, +] +requests = [ + {file = "requests-2.24.0-py2.py3-none-any.whl", hash = "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898"}, + {file = "requests-2.24.0.tar.gz", hash = "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b"}, +] +requests-oauthlib = [ + {file = "requests-oauthlib-1.3.0.tar.gz", hash = "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a"}, + {file = "requests_oauthlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d"}, + {file = "requests_oauthlib-1.3.0-py3.7.egg", hash = "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc"}, +] +requests-toolbelt = [ + {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"}, + {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"}, +] +responses = [ + {file = "responses-0.10.16-py2.py3-none-any.whl", hash = "sha256:cf55b7c89fc77b9ebbc5e5924210b6d0ef437061b80f1273d7e202069e43493c"}, + {file = "responses-0.10.16.tar.gz", hash = "sha256:fa125311607ab3e57d8fcc4da20587f041b4485bdfb06dd6bdf19d8b66f870c1"}, +] +rfc3986 = [ + {file = "rfc3986-1.4.0-py2.py3-none-any.whl", hash = "sha256:af9147e9aceda37c91a05f4deb128d4b4b49d6b199775fd2d2927768abdc8f50"}, + {file = "rfc3986-1.4.0.tar.gz", hash = "sha256:112398da31a3344dc25dbf477d8df6cb34f9278a94fee2625d89e4514be8bb9d"}, +] +rocketchat-api = [ + {file = "rocketchat_API-1.4-py2-none-any.whl", hash = "sha256:91483b2702c0917f8491f69d5b38eba495afae4ce5fee4c46838ea7adb7c2333"}, + {file = "rocketchat_API-1.4-py3-none-any.whl", hash = "sha256:8a5826a972547a6ffb09db171e1ad9990eebf32ed396a3e1bec1f13c18577311"}, +] +rsa = [ + {file = "rsa-4.6-py3-none-any.whl", hash = "sha256:6166864e23d6b5195a5cfed6cd9fed0fe774e226d8f854fcb23b7bbef0350233"}, + {file = "rsa-4.6.tar.gz", hash = "sha256:109ea5a66744dd859bf16fe904b8d8b627adafb9408753161e766a92e7d681fa"}, +] +"ruamel.yaml" = [ + {file = "ruamel.yaml-0.16.10-py2.py3-none-any.whl", hash = "sha256:0962fd7999e064c4865f96fb1e23079075f4a2a14849bcdc5cdba53a24f9759b"}, + {file = "ruamel.yaml-0.16.10.tar.gz", hash = "sha256:099c644a778bf72ffa00524f78dd0b6476bca94a1da344130f4bf3381ce5b954"}, +] +"ruamel.yaml.clib" = [ + {file = "ruamel.yaml.clib-0.2.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9c6d040d0396c28d3eaaa6cb20152cb3b2f15adf35a0304f4f40a3cf9f1d2448"}, + {file = "ruamel.yaml.clib-0.2.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:4d55386129291b96483edcb93b381470f7cd69f97585829b048a3d758d31210a"}, + {file = "ruamel.yaml.clib-0.2.0-cp27-cp27m-win32.whl", hash = "sha256:8073c8b92b06b572e4057b583c3d01674ceaf32167801fe545a087d7a1e8bf52"}, + {file = "ruamel.yaml.clib-0.2.0-cp27-cp27m-win_amd64.whl", hash = "sha256:615b0396a7fad02d1f9a0dcf9f01202bf9caefee6265198f252c865f4227fcc6"}, + {file = "ruamel.yaml.clib-0.2.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:a0ff786d2a7dbe55f9544b3f6ebbcc495d7e730df92a08434604f6f470b899c5"}, + {file = "ruamel.yaml.clib-0.2.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:ea4362548ee0cbc266949d8a441238d9ad3600ca9910c3fe4e82ee3a50706973"}, + {file = "ruamel.yaml.clib-0.2.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:77556a7aa190be9a2bd83b7ee075d3df5f3c5016d395613671487e79b082d784"}, + {file = "ruamel.yaml.clib-0.2.0-cp35-cp35m-win32.whl", hash = "sha256:392b7c371312abf27fb549ec2d5e0092f7ef6e6c9f767bfb13e83cb903aca0fd"}, + {file = "ruamel.yaml.clib-0.2.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ed5b3698a2bb241b7f5cbbe277eaa7fe48b07a58784fba4f75224fd066d253ad"}, + {file = "ruamel.yaml.clib-0.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7aee724e1ff424757b5bd8f6c5bbdb033a570b2b4683b17ace4dbe61a99a657b"}, + {file = "ruamel.yaml.clib-0.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d0d3ac228c9bbab08134b4004d748cf9f8743504875b3603b3afbb97e3472947"}, + {file = "ruamel.yaml.clib-0.2.0-cp36-cp36m-win32.whl", hash = "sha256:f9dcc1ae73f36e8059589b601e8e4776b9976effd76c21ad6a855a74318efd6e"}, + {file = "ruamel.yaml.clib-0.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e77424825caba5553bbade750cec2277ef130647d685c2b38f68bc03453bac6"}, + {file = "ruamel.yaml.clib-0.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d10e9dd744cf85c219bf747c75194b624cc7a94f0c80ead624b06bfa9f61d3bc"}, + {file = "ruamel.yaml.clib-0.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:550168c02d8de52ee58c3d8a8193d5a8a9491a5e7b2462d27ac5bf63717574c9"}, + {file = "ruamel.yaml.clib-0.2.0-cp37-cp37m-win32.whl", hash = "sha256:57933a6986a3036257ad7bf283529e7c19c2810ff24c86f4a0cfeb49d2099919"}, + {file = "ruamel.yaml.clib-0.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b1b7fcee6aedcdc7e62c3a73f238b3d080c7ba6650cd808bce8d7761ec484070"}, + {file = "ruamel.yaml.clib-0.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:be018933c2f4ee7de55e7bd7d0d801b3dfb09d21dad0cce8a97995fd3e44be30"}, + {file = "ruamel.yaml.clib-0.2.0.tar.gz", hash = "sha256:b66832ea8077d9b3f6e311c4a53d06273db5dc2db6e8a908550f3c14d67e718c"}, +] +s3transfer = [ + {file = "s3transfer-0.3.3-py2.py3-none-any.whl", hash = "sha256:2482b4259524933a022d59da830f51bd746db62f047d6eb213f2f8855dcb8a13"}, + {file = "s3transfer-0.3.3.tar.gz", hash = "sha256:921a37e2aefc64145e7b73d50c71bb4f26f46e4c9f414dc648c6245ff92cf7db"}, +] +sacremoses = [ + {file = "sacremoses-0.0.43.tar.gz", hash = "sha256:123c1bf2664351fb05e16f87d3786dbe44a050cfd7b85161c09ad9a63a8e2948"}, +] +sanic = [ + {file = "sanic-19.12.2-py3-none-any.whl", hash = "sha256:18350ed6e264631260044f6253f139f1ac83c4ce8a0202ec900ec5b50c5370ab"}, + {file = "sanic-19.12.2.tar.gz", hash = "sha256:0760b568245917481d65456721c1c50ba72e53e15a191392277751377f2cf770"}, +] +sanic-cors = [ + {file = "Sanic-Cors-0.10.0.post3.tar.gz", hash = "sha256:abb0f8b17d2ecb12d62ecac42ca62bfed9b07fd00ffd83219ad23b99d4df3f23"}, + {file = "Sanic_Cors-0.10.0.post3-py2.py3-none-any.whl", hash = "sha256:b919d65643de810ed1ed15657b8bc75c310fa1ac8eb491d59deaa9404756b745"}, +] +sanic-jwt = [ + {file = "sanic-jwt-1.4.1.tar.gz", hash = "sha256:f55b8c50735340cf943af642cbdfedcf774ca3f9028e60d6784d26cd9be9246b"}, +] +sanic-plugins-framework = [ + {file = "Sanic-Plugins-Framework-0.9.3.tar.gz", hash = "sha256:9fd3d6270a676134c8440f78ce6f0817574b456d6b0cb7c3d73c06bf1b7f8956"}, + {file = "Sanic_Plugins_Framework-0.9.3-py2.py3-none-any.whl", hash = "sha256:347a3657984b828fc6178f86f49f0519f32d45b588724a4f96b12220bce3b4a2"}, +] +scikit-learn = [ + {file = "scikit-learn-0.23.2.tar.gz", hash = "sha256:20766f515e6cd6f954554387dfae705d93c7b544ec0e6c6a5d8e006f6f7ef480"}, + {file = "scikit_learn-0.23.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:98508723f44c61896a4e15894b2016762a55555fbf09365a0bb1870ecbd442de"}, + {file = "scikit_learn-0.23.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a64817b050efd50f9abcfd311870073e500ae11b299683a519fbb52d85e08d25"}, + {file = "scikit_learn-0.23.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:daf276c465c38ef736a79bd79fc80a249f746bcbcae50c40945428f7ece074f8"}, + {file = "scikit_learn-0.23.2-cp36-cp36m-win32.whl", hash = "sha256:cb3e76380312e1f86abd20340ab1d5b3cc46a26f6593d3c33c9ea3e4c7134028"}, + {file = "scikit_learn-0.23.2-cp36-cp36m-win_amd64.whl", hash = "sha256:0a127cc70990d4c15b1019680bfedc7fec6c23d14d3719fdf9b64b22d37cdeca"}, + {file = "scikit_learn-0.23.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aa95c2f17d2f80534156215c87bee72b6aa314a7f8b8fe92a2d71f47280570d"}, + {file = "scikit_learn-0.23.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6c28a1d00aae7c3c9568f61aafeaad813f0f01c729bee4fd9479e2132b215c1d"}, + {file = "scikit_learn-0.23.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:da8e7c302003dd765d92a5616678e591f347460ac7b53e53d667be7dfe6d1b10"}, + {file = "scikit_learn-0.23.2-cp37-cp37m-win32.whl", hash = "sha256:d9a1ce5f099f29c7c33181cc4386660e0ba891b21a60dc036bf369e3a3ee3aec"}, + {file = "scikit_learn-0.23.2-cp37-cp37m-win_amd64.whl", hash = "sha256:914ac2b45a058d3f1338d7736200f7f3b094857758895f8667be8a81ff443b5b"}, + {file = "scikit_learn-0.23.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7671bbeddd7f4f9a6968f3b5442dac5f22bf1ba06709ef888cc9132ad354a9ab"}, + {file = "scikit_learn-0.23.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d0dcaa54263307075cb93d0bee3ceb02821093b1b3d25f66021987d305d01dce"}, + {file = "scikit_learn-0.23.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ce7a8021c9defc2b75620571b350acc4a7d9763c25b7593621ef50f3bd019a2"}, + {file = "scikit_learn-0.23.2-cp38-cp38-win32.whl", hash = "sha256:0d39748e7c9669ba648acf40fb3ce96b8a07b240db6888563a7cb76e05e0d9cc"}, + {file = "scikit_learn-0.23.2-cp38-cp38-win_amd64.whl", hash = "sha256:1b8a391de95f6285a2f9adffb7db0892718950954b7149a70c783dc848f104ea"}, +] +scipy = [ + {file = "scipy-1.4.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:c5cac0c0387272ee0e789e94a570ac51deb01c796b37fb2aad1fb13f85e2f97d"}, + {file = "scipy-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a144811318853a23d32a07bc7fd5561ff0cac5da643d96ed94a4ffe967d89672"}, + {file = "scipy-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:71eb180f22c49066f25d6df16f8709f215723317cc951d99e54dc88020ea57be"}, + {file = "scipy-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:770254a280d741dd3436919d47e35712fb081a6ff8bafc0f319382b954b77802"}, + {file = "scipy-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:a1aae70d52d0b074d8121333bc807a485f9f1e6a69742010b33780df2e60cfe0"}, + {file = "scipy-1.4.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:bb517872058a1f087c4528e7429b4a44533a902644987e7b2fe35ecc223bc408"}, + {file = "scipy-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:dba8306f6da99e37ea08c08fef6e274b5bf8567bb094d1dbe86a20e532aca088"}, + {file = "scipy-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:386086e2972ed2db17cebf88610aab7d7f6e2c0ca30042dc9a89cf18dcc363fa"}, + {file = "scipy-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:8d3bc3993b8e4be7eade6dcc6fd59a412d96d3a33fa42b0fa45dc9e24495ede9"}, + {file = "scipy-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:dc60bb302f48acf6da8ca4444cfa17d52c63c5415302a9ee77b3b21618090521"}, + {file = "scipy-1.4.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:787cc50cab3020a865640aba3485e9fbd161d4d3b0d03a967df1a2881320512d"}, + {file = "scipy-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0902a620a381f101e184a958459b36d3ee50f5effd186db76e131cbefcbb96f7"}, + {file = "scipy-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:00af72998a46c25bdb5824d2b729e7dabec0c765f9deb0b504f928591f5ff9d4"}, + {file = "scipy-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:9508a7c628a165c2c835f2497837bf6ac80eb25291055f56c129df3c943cbaf8"}, + {file = "scipy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2d6df9eb074af7f08866598e4ef068a2b310d98f87dc23bd1b90ec7bdcec802"}, + {file = "scipy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3092857f36b690a321a662fe5496cb816a7f4eecd875e1d36793d92d3f884073"}, + {file = "scipy-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8a07760d5c7f3a92e440ad3aedcc98891e915ce857664282ae3c0220f3301eb6"}, + {file = "scipy-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3190466d669d658233e8a583b854f6386dd62d655539b77b3fa25bfb2abb70"}, + {file = "scipy-1.4.1-cp38-cp38-win32.whl", hash = "sha256:cc971a82ea1170e677443108703a2ec9ff0f70752258d0e9f5433d00dda01f59"}, + {file = "scipy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:2cce3f9847a1a51019e8c5b47620da93950e58ebc611f13e0d11f4980ca5fecb"}, + {file = "scipy-1.4.1.tar.gz", hash = "sha256:dee1bbf3a6c8f73b6b218cb28eed8dd13347ea2f87d572ce19b289d6fd3fbc59"}, +] +sentencepiece = [ + {file = "sentencepiece-0.1.92-cp27-cp27m-macosx_10_6_x86_64.whl", hash = "sha256:366cd4e01e723075786f0383d6b153965c09567ced96618dc2a3be2ca7353d5a"}, + {file = "sentencepiece-0.1.92-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:d1a2d2d83e1e9b38f873fadef090c0d6c020f522bf7f256b92ffe5464e0d8ef9"}, + {file = "sentencepiece-0.1.92-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:040c91321e3ad1b468283a2518bdcddcd7eaed61072dbe65357f03551b2a33c4"}, + {file = "sentencepiece-0.1.92-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e6e32647417084623b4d17ec2e16de4be112198be9536d760ade05fb53749a3b"}, + {file = "sentencepiece-0.1.92-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:997bc09c70e9d6f8b880bffaf83900cf1dabf65afd0b4ea5461b4c813caaec40"}, + {file = "sentencepiece-0.1.92-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:d15c0550b9e034ed7c43357abfbbe05e8c05e63116162ca9e2c4557b6dca2653"}, + {file = "sentencepiece-0.1.92-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3d6760dad64d228bbd655aee5063a379e05d348efb3914334110e813fb0db941"}, + {file = "sentencepiece-0.1.92-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:e6ab8b0b67b345581aa0b49c0fc58a8c7515eb4a27b9cc24d3cbfe2affbdd7e8"}, + {file = "sentencepiece-0.1.92-cp36-cp36m-macosx_10_6_x86_64.whl", hash = "sha256:9939d818053e0929edbb144f14487e2cd007a32445658aa0376673aefc87d6d3"}, + {file = "sentencepiece-0.1.92-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:7755b14655e882fffeb4b76571bc6074d048727fb3b081b7605381edadcf9442"}, + {file = "sentencepiece-0.1.92-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7fd16c761339f593596b63e50810a2d2eff964d428ab79a49674c7371c055561"}, + {file = "sentencepiece-0.1.92-cp36-cp36m-win32.whl", hash = "sha256:de808a218b197e6105bacafe0008a1c76d5c5d9be3d10dc4c6d2974653f01034"}, + {file = "sentencepiece-0.1.92-cp36-cp36m-win_amd64.whl", hash = "sha256:fa9cb02572bc28dad2c53761678d314e194bd8d3a87a3eb545427915b5034c57"}, + {file = "sentencepiece-0.1.92-cp37-cp37m-macosx_10_6_x86_64.whl", hash = "sha256:3d8c36c21d4ed395f24b98e3289128448e23288b8f1a9242d5f33df13d16fd90"}, + {file = "sentencepiece-0.1.92-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:b36a8dfd4de5c65ed0712839985efa4ecd21b645d870db5c18823b29f6c9117a"}, + {file = "sentencepiece-0.1.92-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:da5e5c29b4754da8cda52e72a1bfcfddd7d60bfe73bdf8e286650eacc205ac62"}, + {file = "sentencepiece-0.1.92-cp37-cp37m-win32.whl", hash = "sha256:ef587b573700e77e6bd8e4d702e71129ec71171c712cd15bac351d5726d611fa"}, + {file = "sentencepiece-0.1.92-cp37-cp37m-win_amd64.whl", hash = "sha256:c46a687723433d2e933ecbe201d818c3b7a18d0e0043794ed750c3472391b0c7"}, + {file = "sentencepiece-0.1.92-cp38-cp38-macosx_10_6_x86_64.whl", hash = "sha256:8e314d06ff358881df0ee784f653d222c4ea551eb7fcac00c80b082d63810602"}, + {file = "sentencepiece-0.1.92-cp38-cp38-manylinux1_i686.whl", hash = "sha256:fbb9060839c95799029f4ddf2154aa0fd2e73836e589b1c8ea02ece236252599"}, + {file = "sentencepiece-0.1.92-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0ae4ff32626786d8c46823bdffce1098dcd5c8fdf0875000b9b95974301d8162"}, + {file = "sentencepiece-0.1.92-cp38-cp38-win32.whl", hash = "sha256:f7a695e6bfc1a78dddacb0cbbbaba02a12da26ec1570cb91a9744fd46467a9fc"}, + {file = "sentencepiece-0.1.92-cp38-cp38-win_amd64.whl", hash = "sha256:21c23ebe3c7299bd3d2e09f68a669b28d3a2f652f67798176840b5a31f952667"}, + {file = "sentencepiece-0.1.92.tar.gz", hash = "sha256:5daf059b31ef82b52698f86891ed8376550d36a5b87a4a3590a94a255346ee08"}, +] +sentinels = [ + {file = "sentinels-1.0.0.tar.gz", hash = "sha256:7be0704d7fe1925e397e92d18669ace2f619c92b5d4eb21a89f31e026f9ff4b1"}, +] +six = [ + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, +] +sklearn-crfsuite = [ + {file = "sklearn-crfsuite-0.3.6.tar.gz", hash = "sha256:2f59aad3055e01a778a79a6352891cac04788e8b52688aa5bc8b11be7717861e"}, + {file = "sklearn_crfsuite-0.3.6-py2.py3-none-any.whl", hash = "sha256:6e9a42bc3de96941d5f7262335130955b8c380b1356147622368f385075705d9"}, +] +slackclient = [ + {file = "slackclient-2.8.0-py2.py3-none-any.whl", hash = "sha256:a7ea172a63b011770d3886e84d4b5c32055bab6ae6c0e03c88a98637c02456d6"}, + {file = "slackclient-2.8.0.tar.gz", hash = "sha256:373bebed7ed5c32bca84de1a206ebe2984b830aab8d04bd1836f082de6bf9105"}, +] +sniffio = [ + {file = "sniffio-1.1.0-py3-none-any.whl", hash = "sha256:20ed6d5b46f8ae136d00b9dcb807615d83ed82ceea6b2058cecb696765246da5"}, + {file = "sniffio-1.1.0.tar.gz", hash = "sha256:8e3810100f69fe0edd463d02ad407112542a11ffdc29f67db2bf3771afb87a21"}, +] +sortedcontainers = [ + {file = "sortedcontainers-2.2.2-py2.py3-none-any.whl", hash = "sha256:c633ebde8580f241f274c1f8994a665c0e54a17724fecd0cae2f079e09c36d3f"}, + {file = "sortedcontainers-2.2.2.tar.gz", hash = "sha256:4e73a757831fc3ca4de2859c422564239a31d8213d09a2a666e375807034d2ba"}, +] +spacy = [ + {file = "spacy-2.2.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fd740cb1b50cd86c648f64313be4734b0c2a2931d83761f46821061f42d791a3"}, + {file = "spacy-2.2.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01202066f75c7f2cfeb9c167c3184b5b0a9d465604b0ca553bd9e788353c5905"}, + {file = "spacy-2.2.4-cp36-cp36m-win_amd64.whl", hash = "sha256:f75ba238066455f5b5498a987b4e2c84705d92138e02e890e0b0a1d1eb2d9462"}, + {file = "spacy-2.2.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ce3886e9bfb9071d2708d2cd7157ada93ab378bbb38cf079842181cd671fc6f9"}, + {file = "spacy-2.2.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:212314be762bd40dfbbeeba1c4742c242e4b6ea3f9340891f0ff282b2e723ed0"}, + {file = "spacy-2.2.4-cp37-cp37m-win_amd64.whl", hash = "sha256:c5e6f8155f6b54a8ef89637b3c7d553f0ddb5478c4dd568fde7392efbf8a26c8"}, + {file = "spacy-2.2.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7313b4fa921ed997d9719f99f5a375d672d2f4a908c7750033c4b37d9fa8547a"}, + {file = "spacy-2.2.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6c1618c05bf65ae4bc94608f2390130ca21112fb3d920d1a03727691e3e7fb1b"}, + {file = "spacy-2.2.4-cp38-cp38-win_amd64.whl", hash = "sha256:877d8e157a708c8b77c0dea61e526632f6d57f27be64087dac22a4581facea68"}, + {file = "spacy-2.2.4.tar.gz", hash = "sha256:f0f3a67c5841e6e35d62c98f40ebb3d132587d3aba4f4dccac5056c4e90ff5b9"}, +] +sqlalchemy = [ + {file = "SQLAlchemy-1.3.18-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:f11c2437fb5f812d020932119ba02d9e2bc29a6eca01a055233a8b449e3e1e7d"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0ec575db1b54909750332c2e335c2bb11257883914a03bc5a3306a4488ecc772"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27m-win32.whl", hash = "sha256:8cac7bb373a5f1423e28de3fd5fc8063b9c8ffe8957dc1b1a59cb90453db6da1"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27m-win_amd64.whl", hash = "sha256:adad60eea2c4c2a1875eb6305a0b6e61a83163f8e233586a4d6a55221ef984fe"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:57aa843b783179ab72e863512e14bdcba186641daf69e4e3a5761d705dcc35b1"}, + {file = "SQLAlchemy-1.3.18-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:621f58cd921cd71ba6215c42954ffaa8a918eecd8c535d97befa1a8acad986dd"}, + {file = "SQLAlchemy-1.3.18-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd"}, + {file = "SQLAlchemy-1.3.18-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:736d41cfebedecc6f159fc4ac0769dc89528a989471dc1d378ba07d29a60ba1c"}, + {file = "SQLAlchemy-1.3.18-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:427273b08efc16a85aa2b39892817e78e3ed074fcb89b2a51c4979bae7e7ba98"}, + {file = "SQLAlchemy-1.3.18-cp35-cp35m-win32.whl", hash = "sha256:cbe1324ef52ff26ccde2cb84b8593c8bf930069dfc06c1e616f1bfd4e47f48a3"}, + {file = "SQLAlchemy-1.3.18-cp35-cp35m-win_amd64.whl", hash = "sha256:8fd452dc3d49b3cc54483e033de6c006c304432e6f84b74d7b2c68afa2569ae5"}, + {file = "SQLAlchemy-1.3.18-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:e89e0d9e106f8a9180a4ca92a6adde60c58b1b0299e1b43bd5e0312f535fbf33"}, + {file = "SQLAlchemy-1.3.18-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6ac2558631a81b85e7fb7a44e5035347938b0a73f5fdc27a8566777d0792a6a4"}, + {file = "SQLAlchemy-1.3.18-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:87fad64529cde4f1914a5b9c383628e1a8f9e3930304c09cf22c2ae118a1280e"}, + {file = "SQLAlchemy-1.3.18-cp36-cp36m-win32.whl", hash = "sha256:e4624d7edb2576cd72bb83636cd71c8ce544d8e272f308bd80885056972ca299"}, + {file = "SQLAlchemy-1.3.18-cp36-cp36m-win_amd64.whl", hash = "sha256:89494df7f93b1836cae210c42864b292f9b31eeabca4810193761990dc689cce"}, + {file = "SQLAlchemy-1.3.18-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:716754d0b5490bdcf68e1e4925edc02ac07209883314ad01a137642ddb2056f1"}, + {file = "SQLAlchemy-1.3.18-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:50c4ee32f0e1581828843267d8de35c3298e86ceecd5e9017dc45788be70a864"}, + {file = "SQLAlchemy-1.3.18-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d98bc827a1293ae767c8f2f18be3bb5151fd37ddcd7da2a5f9581baeeb7a3fa1"}, + {file = "SQLAlchemy-1.3.18-cp37-cp37m-win32.whl", hash = "sha256:0942a3a0df3f6131580eddd26d99071b48cfe5aaf3eab2783076fbc5a1c1882e"}, + {file = "SQLAlchemy-1.3.18-cp37-cp37m-win_amd64.whl", hash = "sha256:16593fd748944726540cd20f7e83afec816c2ac96b082e26ae226e8f7e9688cf"}, + {file = "SQLAlchemy-1.3.18-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:c26f95e7609b821b5f08a72dab929baa0d685406b953efd7c89423a511d5c413"}, + {file = "SQLAlchemy-1.3.18-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:512a85c3c8c3995cc91af3e90f38f460da5d3cade8dc3a229c8e0879037547c9"}, + {file = "SQLAlchemy-1.3.18-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d05c4adae06bd0c7f696ae3ec8d993ed8ffcc4e11a76b1b35a5af8a099bd2284"}, + {file = "SQLAlchemy-1.3.18-cp38-cp38-win32.whl", hash = "sha256:109581ccc8915001e8037b73c29590e78ce74be49ca0a3630a23831f9e3ed6c7"}, + {file = "SQLAlchemy-1.3.18-cp38-cp38-win_amd64.whl", hash = "sha256:8619b86cb68b185a778635be5b3e6018623c0761dde4df2f112896424aa27bd8"}, + {file = "SQLAlchemy-1.3.18.tar.gz", hash = "sha256:da2fb75f64792c1fc64c82313a00c728a7c301efe6a60b7a9fe35b16b4368ce7"}, +] +srsly = [ + {file = "srsly-1.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7c553a709fd56a37a07f969e849f55a0aeabaeb7677bebc588a640ab8ec134aa"}, + {file = "srsly-1.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:21cfb0e5dea2c4515b5c2daa78402d5782c6425b4f58af40d2e2cb45e4778d8c"}, + {file = "srsly-1.0.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:46213d8f094b348a9433c825ac1eba36a21aa25a8bae6f29c2f9f053e15be961"}, + {file = "srsly-1.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2179cf1e88c250e89e40227bd5848341011c170079b3d424987d067de6a73f42"}, + {file = "srsly-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b94d8a13c60e3298a9ba12b1b211026e8378c7d087efd7ce46a3f2d8d4678d94"}, + {file = "srsly-1.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8beff52c104a7ffe4a15513a05dc0497998cf83aa1ca39454489994d18c1c07"}, + {file = "srsly-1.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:95849d84e8929be248a180e672c8ce1ed98b1341263bc983efdf8427465584f1"}, + {file = "srsly-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:3f3975e8cb67194d26dd03508469b1303f8b994f30e7782f7eae25fef6dc4aad"}, + {file = "srsly-1.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d409beb7257208633c974c01f9dc3265562fb6802caee7de21880761ba87c3ed"}, + {file = "srsly-1.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:18bad26c34cf5a8853fbf018fd168a7bf2ea7ce661e66476c25dac711cb79c9b"}, + {file = "srsly-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:29434753a77481ec6129991f4116f983085cc8005c1ad963261124842e8c05fc"}, + {file = "srsly-1.0.2.tar.gz", hash = "sha256:59258b81d567df207f8a0a33c4b5fa232afccf1d927c8ce3ba5395bfd64c0ed8"}, +] +sshpubkeys = [ + {file = "sshpubkeys-3.1.0-py2.py3-none-any.whl", hash = "sha256:9f73d51c2ef1e68cd7bde0825df29b3c6ec89f4ce24ebca3bf9eaa4a23a284db"}, + {file = "sshpubkeys-3.1.0.tar.gz", hash = "sha256:b388399caeeccdc145f06fd0d2665eeecc545385c60b55c282a15a022215af80"}, +] +tabulate = [ + {file = "tabulate-0.8.7-py3-none-any.whl", hash = "sha256:ac64cb76d53b1231d364babcd72abbb16855adac7de6665122f97b593f1eb2ba"}, + {file = "tabulate-0.8.7.tar.gz", hash = "sha256:db2723a20d04bcda8522165c73eea7c300eda74e0ce852d9022e0159d7895007"}, +] +tensorboard = [ + {file = "tensorboard-2.3.0-py3-none-any.whl", hash = "sha256:d34609ed83ff01dd5b49ef81031cfc9c166bba0dabd60197024f14df5e8eae5e"}, +] +tensorboard-plugin-wit = [ + {file = "tensorboard_plugin_wit-1.7.0-py3-none-any.whl", hash = "sha256:ee775f04821185c90d9a0e9c56970ee43d7c41403beb6629385b39517129685b"}, +] +tensorflow = [ + {file = "tensorflow-2.3.0-cp35-cp35m-macosx_10_11_x86_64.whl", hash = "sha256:c6fad4e944e20199e963e158fe626352e349865ea4ca71655f5456193a6d3b9d"}, + {file = "tensorflow-2.3.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:6f74ef59dc59cf8f2002738c65dffa591e2c332e9b1b4ced33ff8d39b6fb477c"}, + {file = "tensorflow-2.3.0-cp35-cp35m-win_amd64.whl", hash = "sha256:797d6ca09d4f69570458180b7813dc12efe9166ba60454b0df7bed531bb5e4f4"}, + {file = "tensorflow-2.3.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:b1699903cf3a9f41c379d79ada2279a206a071b7e05671646d7b5e7fc37e2eae"}, + {file = "tensorflow-2.3.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:5c9f9a36d5b4d0ceb67b985486fe4cc6999a96e2bf89f3ba82ffd8317e5efadd"}, + {file = "tensorflow-2.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:bc9d761a857839344930eef86f0d6409840b1c9ada9cbe56b92287b2077ef752"}, + {file = "tensorflow-2.3.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0cfb0fbe875408cdbfc7677f12aa0b23656f3e6d8c5f568b3100450ec29262a7"}, + {file = "tensorflow-2.3.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:92430b6e91f00f38a602c4f547bbbaca598a3a90376f90d5b2acd24bc18fa1d7"}, + {file = "tensorflow-2.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:36a4ce9bbc9865385c1bb606fe34f0da96b0496ce3997e652d2b765a4382fe48"}, + {file = "tensorflow-2.3.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:44c8d979b2d19ed56dbe6b03aef87616d6138a58fd80c43e7a758c90105e9adf"}, + {file = "tensorflow-2.3.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:c33a423eb1f39c4c6acc44c044a138979868f0d4c91e380c191bd8fddc7c2e9b"}, + {file = "tensorflow-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2d9994157d6a222d9ffd956e99af4b5e46e47338428d2d197e325362283ec835"}, +] +tensorflow-addons = [ + {file = "tensorflow_addons-0.10.0-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:8dae39a84dcd5eeb0889ebaed86158bd2904c7dde9d5873598712fa12993095c"}, + {file = "tensorflow_addons-0.10.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:8a607414f54248d6bfbfdd2afdc8d1ac2619d5caa37013c0302775b170024524"}, + {file = "tensorflow_addons-0.10.0-cp35-cp35m-win_amd64.whl", hash = "sha256:0353c10ab3dd332d3c4f9474b0102bc9f277fc0748d9c7a249a77dca8680b881"}, + {file = "tensorflow_addons-0.10.0-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:91a3fd625c4550e08c952ca03cc0181362a66a916f8f55631c07a10e8c1d5076"}, + {file = "tensorflow_addons-0.10.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:48e58343daa94f62b31bf5f418b5f1f3f7123c7b373ddc085e1574ea1299263e"}, + {file = "tensorflow_addons-0.10.0-cp36-cp36m-win_amd64.whl", hash = "sha256:83c452a0ab8a91695837a9380218eca7de0cda0e700ca3c48b5c39b16841f61b"}, + {file = "tensorflow_addons-0.10.0-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:c1379b7eacd2ab254b3e5c5041d6e10caa2e373b86451de800c7dab77595e8b7"}, + {file = "tensorflow_addons-0.10.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:c070d28a45ca09d323f2052d83bfab97f9186b6e59191016a8dd5f2547d486a3"}, + {file = "tensorflow_addons-0.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:16a4184f42399843ffcad23ca96b9d256a3e0a5f2d3e76c7ceecff5bad6e1c1c"}, + {file = "tensorflow_addons-0.10.0-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:e185716992fe0d2af6d5826042834b85feda4e4887ad1e29fddf5c9640c38da0"}, + {file = "tensorflow_addons-0.10.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:08034ad2c177038990afc6aef9e7a90546f2872f3a39c4f966d2561346a749ab"}, + {file = "tensorflow_addons-0.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:d5df84bcd4fbd7006cb2602c96b4a736a46a8667b4179e82a843a159d5802eab"}, +] +tensorflow-estimator = [ + {file = "tensorflow_estimator-2.3.0-py2.py3-none-any.whl", hash = "sha256:b75e034300ccb169403cf2695adf3368da68863aeb0c14c3760064c713d5c486"}, +] +tensorflow-hub = [ + {file = "tensorflow_hub-0.8.0-py2.py3-none-any.whl", hash = "sha256:9dc65aa9980851236e30a1f59f071286abc52523fa75933fdc1fba9e3c1c96c8"}, +] +tensorflow-probability = [ + {file = "tensorflow_probability-0.10.1-py2.py3-none-any.whl", hash = "sha256:3ae630d3e0ae12623d77ab2394291dcf0d3ca6b78a8d193372165f7c335f14d0"}, +] +tensorflow-text = [ + {file = "tensorflow_text-2.3.0-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:beca0ca7b724a1c2d42190bf09364beefe3a49936ae39d953104940a6bea180f"}, + {file = "tensorflow_text-2.3.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:2938a7493e6f3c304554327740699771c77e514222a23aab33b3a49a7f3e94a4"}, + {file = "tensorflow_text-2.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7c940343c98ede59d829be61dcb4f0f2f136b87c93c72ce0149a427b56aca9a5"}, + {file = "tensorflow_text-2.3.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7bd146f0e4ce413da861d384d02a8843795a02feb84a143477e2904252eba093"}, + {file = "tensorflow_text-2.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:96a7a71d6d7b53d8b13fe2509b06e5ca5a4664071a2f59e4383a2061c6ae6656"}, + {file = "tensorflow_text-2.3.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:ec3dda6454d17fbc32df7eb191fc9abe723a08d26c491177f4b0af5b6229f074"}, + {file = "tensorflow_text-2.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2832bd05d9a07d6c8ba49bfbfcba2bfc1463a03e12dc38358c7a58ff965a79c3"}, + {file = "tensorflow_text-2.3.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:3c627dc7d865f505a785e011995a1600cfaaa406121bb9d35d8e92efd245a1b2"}, +] +termcolor = [ + {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"}, +] +terminaltables = [ + {file = "terminaltables-3.1.0.tar.gz", hash = "sha256:f3eb0eb92e3833972ac36796293ca0906e998dc3be91fbe1f8615b331b853b81"}, +] +thinc = [ + {file = "thinc-7.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c40101f3148405cb291be2033758d011d348a5dea5d151811def8d1e466f25a"}, + {file = "thinc-7.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ebb81b7ff8f852aae1b9c26dfb629344ab962e221ec87c83b2a7c4aec337477d"}, + {file = "thinc-7.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:23b77994be3376cd8efa85adfa1bcf0ffcb4cfd279f48a3ab842570f419334ca"}, + {file = "thinc-7.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2aa4cab69067f9dbe4ed7a1d937a4467edcc5f50d43996fba8c645f08ab1f387"}, + {file = "thinc-7.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:0522cc8b7a74e1de0902b55e1f141f889a088565f72ea0042a9c0f7f3ce83879"}, + {file = "thinc-7.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d1ee60d44ee840b75c0c0a3ade70908f05f414a65f20082483a5a5bfe82e9497"}, + {file = "thinc-7.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1375c11ed4f7c7178a5749e17b2f3bb1644c98ecc8874e402aceaeec63df6297"}, + {file = "thinc-7.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7bb69a8cace8d85a3f65d94176f381c5216df08d79a520b005653d0a23f523a8"}, + {file = "thinc-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3c5786238991925694aba81fa305c1f2290a960fe5428a26b6f82134b260ad1"}, + {file = "thinc-7.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:a7332e323b76d63e1cfd2e6bc08a5527c5a6a0eba39197c56af8fe6eef62ef69"}, + {file = "thinc-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5ac162b010f21f8fcc3fd10766025fad3ec670f6b2e0a72284912332d1ae292a"}, + {file = "thinc-7.4.0.tar.gz", hash = "sha256:523e9be1bfaa3ed1d03d406ce451b6b4793a9719d5b83d2ea6b3398b96bc58b8"}, +] +threadpoolctl = [ + {file = "threadpoolctl-2.1.0-py3-none-any.whl", hash = "sha256:38b74ca20ff3bb42caca8b00055111d74159ee95c4370882bbff2b93d24da725"}, + {file = "threadpoolctl-2.1.0.tar.gz", hash = "sha256:ddc57c96a38beb63db45d6c159b5ab07b6bced12c45a1f07b2b92f272aebfa6b"}, +] +tokenizers = [ + {file = "tokenizers-0.7.0-cp35-cp35m-macosx_10_10_x86_64.whl", hash = "sha256:c9edc043bc14462faf8b261b528661718e9c4f0b8424fb25be71cae26187432a"}, + {file = "tokenizers-0.7.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:892dac477347c65d65eef5092e9aa0c02df17f1a6d2113380277505bc6ae1db4"}, + {file = "tokenizers-0.7.0-cp35-cp35m-win32.whl", hash = "sha256:aa7d429b4c2978e1b2265a9fdbf27fe723f3acb9d58cebd6756ef20584d2d5e5"}, + {file = "tokenizers-0.7.0-cp35-cp35m-win_amd64.whl", hash = "sha256:8f4203683b66369defa6fdd91ba07828715537ff31258dab171e4029bf54f7c9"}, + {file = "tokenizers-0.7.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:e0faee1f08daaec0f9220967c8209b19e147e6eda55a22bea8fcc6f06aee95c7"}, + {file = "tokenizers-0.7.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:17793599e4a0bb71730e366ecef47e4c0df2a79b4418d7557bf3af6cb995f8ba"}, + {file = "tokenizers-0.7.0-cp36-cp36m-win32.whl", hash = "sha256:fe3c994d2a993d32effcaf8600bf6ac29ef7de84519669f0efadb54f94d411a3"}, + {file = "tokenizers-0.7.0-cp36-cp36m-win_amd64.whl", hash = "sha256:83da606afe2a5e7941a25490d841924750d55d7667284d2d2ded2de520181790"}, + {file = "tokenizers-0.7.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:1b28e8ec30eea03b0d9bf7fe80c6fd240b7e5b76e7ec9542af0a48ffc1853a16"}, + {file = "tokenizers-0.7.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:695657cddabb9bb08444ba1bed822302039983c63d046e93760eb993739c3c10"}, + {file = "tokenizers-0.7.0-cp37-cp37m-win32.whl", hash = "sha256:a0abe20c50ca0760a895da33f1b55d452f21e55bddc418007d92d8665e86feb7"}, + {file = "tokenizers-0.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b319d70f50c851ec4ae9a3d5c4eae1e3f74f8d720d61bc3d430915868a06a4a8"}, + {file = "tokenizers-0.7.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:f22ea3a79daf3705d9a8446821b3e202e8cc79467df7db75875d1fbb85d7c852"}, + {file = "tokenizers-0.7.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:2b101a752ee6147c4a5e08daa9c7d617259483fd4b0c70e7dfddfcadc8a73d2f"}, + {file = "tokenizers-0.7.0-cp38-cp38-win32.whl", hash = "sha256:03ad125d12e69a343763dbb160f43d953513cb32c5e11674c09431133ebcfd8b"}, + {file = "tokenizers-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:40520c333c1d602d0f99602bfeecd8f734188fc4360268ec7eb4d8b8570c6e95"}, + {file = "tokenizers-0.7.0.tar.gz", hash = "sha256:a3cb9be31e3be381ab3f9e9ea7f96d4ba83588c40c44fe63b535b7341cdf74fe"}, +] +toml = [ + {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, + {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, +] +tornado = [ + {file = "tornado-6.0.4-cp35-cp35m-win32.whl", hash = "sha256:5217e601700f24e966ddab689f90b7ea4bd91ff3357c3600fa1045e26d68e55d"}, + {file = "tornado-6.0.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c98232a3ac391f5faea6821b53db8db461157baa788f5d6222a193e9456e1740"}, + {file = "tornado-6.0.4-cp36-cp36m-win32.whl", hash = "sha256:5f6a07e62e799be5d2330e68d808c8ac41d4a259b9cea61da4101b83cb5dc673"}, + {file = "tornado-6.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c952975c8ba74f546ae6de2e226ab3cc3cc11ae47baf607459a6728585bb542a"}, + {file = "tornado-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:2c027eb2a393d964b22b5c154d1a23a5f8727db6fda837118a776b29e2b8ebc6"}, + {file = "tornado-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:5618f72e947533832cbc3dec54e1dffc1747a5cb17d1fd91577ed14fa0dc081b"}, + {file = "tornado-6.0.4-cp38-cp38-win32.whl", hash = "sha256:22aed82c2ea340c3771e3babc5ef220272f6fd06b5108a53b4976d0d722bcd52"}, + {file = "tornado-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:c58d56003daf1b616336781b26d184023ea4af13ae143d9dda65e31e534940b9"}, + {file = "tornado-6.0.4.tar.gz", hash = "sha256:0fe2d45ba43b00a41cd73f8be321a44936dc1aba233dee979f17a042b83eb6dc"}, +] +towncrier = [ + {file = "towncrier-19.2.0-py2.py3-none-any.whl", hash = "sha256:de19da8b8cb44f18ea7ed3a3823087d2af8fcf497151bb9fd1e1b092ff56ed8d"}, + {file = "towncrier-19.2.0.tar.gz", hash = "sha256:48251a1ae66d2cf7e6fa5552016386831b3e12bb3b2d08eb70374508c17a8196"}, +] +tqdm = [ + {file = "tqdm-4.47.0-py2.py3-none-any.whl", hash = "sha256:7810e627bcf9d983a99d9ff8a0c09674400fd2927eddabeadf153c14a2ec8656"}, + {file = "tqdm-4.47.0.tar.gz", hash = "sha256:63ef7a6d3eb39f80d6b36e4867566b3d8e5f1fe3d6cb50c5e9ede2b3198ba7b7"}, +] +transformers = [ + {file = "transformers-2.11.0-py3-none-any.whl", hash = "sha256:b3e5198266f2a4b14841c70427cad46b89f473e6b0d0d3ab7461bf775f31631d"}, + {file = "transformers-2.11.0.tar.gz", hash = "sha256:8de20f03a94ebf16d98610a7df0acc6ba68c80bd44605cf5ad4300c642a7b57a"}, +] +twilio = [ + {file = "twilio-6.42.0.tar.gz", hash = "sha256:9d423321d577cab175712e4cc3636b68534572c3ab1c6c5b191925d3abac0223"}, +] +typed-ast = [ + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3"}, + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01"}, + {file = "typed_ast-1.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa"}, + {file = "typed_ast-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355"}, + {file = "typed_ast-1.4.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d"}, + {file = "typed_ast-1.4.1-cp38-cp38-win32.whl", hash = "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c"}, + {file = "typed_ast-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4"}, + {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, + {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, +] +typeguard = [ + {file = "typeguard-2.9.1-py3-none-any.whl", hash = "sha256:e258567e62d28f9a51d4f7c71f491154e9ef0889286ad2f37e3e22e4f668b21b"}, + {file = "typeguard-2.9.1.tar.gz", hash = "sha256:529ef3d88189cc457f4340388028412f71be8091c2c943465146d4170fb67288"}, +] +typing-extensions = [ + {file = "typing_extensions-3.7.4.2-py2-none-any.whl", hash = "sha256:f8d2bd89d25bc39dabe7d23df520442fa1d8969b82544370e03d88b5a591c392"}, + {file = "typing_extensions-3.7.4.2-py3-none-any.whl", hash = "sha256:6e95524d8a547a91e08f404ae485bbb71962de46967e1b71a0cb89af24e761c5"}, + {file = "typing_extensions-3.7.4.2.tar.gz", hash = "sha256:79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae"}, +] +tzlocal = [ + {file = "tzlocal-2.1-py2.py3-none-any.whl", hash = "sha256:e2cb6c6b5b604af38597403e9852872d7f534962ae2954c7f35efcb1ccacf4a4"}, + {file = "tzlocal-2.1.tar.gz", hash = "sha256:643c97c5294aedc737780a49d9df30889321cbe1204eac2c2ec6134035a92e44"}, +] +ujson = [ + {file = "ujson-3.1.0-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:585329c16eeb0992308545c7a05eee76c7f1c2042b08317aac64fef1e71e71a9"}, + {file = "ujson-3.1.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:d40d53aa679994624960b1c40bc451c5d9e3a0779f6a04146b417e220ce8f7d4"}, + {file = "ujson-3.1.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b794a2ddba049daa1fe0f28c4652b54d0d06580e7d3bcae87b4c000fb242654f"}, + {file = "ujson-3.1.0-cp35-cp35m-win_amd64.whl", hash = "sha256:0789837e7156e07890f2461ec1d9dc2ea4ef7c76fd36e46955d811485daf83b9"}, + {file = "ujson-3.1.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:f6e229fa9b7d8586f894d18a003703fea4620ff8e6d39c11499755ecec5b43fc"}, + {file = "ujson-3.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ebe1bc9536b93c697f042b6c39ed602ad7edd5ef139eb167d9c8e36594b24546"}, + {file = "ujson-3.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6a2fac9892996c3a46bd3fd8bbf739f19f85fc7ec9cdc11b014f76267c3ee76d"}, + {file = "ujson-3.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:083778f4e64f90b4468e73e0baafcde0ab83bff85315d28a5b1287a3f5909e7f"}, + {file = "ujson-3.1.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:d3f28f64b33609be20f985baa72d43ccfdf87e50d47ca2791d26b9c2f348a35b"}, + {file = "ujson-3.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:3a049acf176dacbb79ee00b13a78fc2524c35c69400e34aa5f6ff0fc3fdbb1f0"}, + {file = "ujson-3.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c2be0d511e5dc302f190e510544c4d5fbbb4396632abe33b66e28dad26ea4325"}, + {file = "ujson-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7e7cdd8a42428cd2716cfe0a506e57168c6a02b8032478209cb4a8a12b8c2c4e"}, + {file = "ujson-3.1.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a8194b778adf38ad679c62e2bb6cf71972ae91102e611e4bb31f625be6fb366a"}, + {file = "ujson-3.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0184ed23618ce3d793aaf9f5b3dd456cf719b930d3936fb39000589ed0bd2811"}, + {file = "ujson-3.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8032ca897cfce113cb9ca7f07ff4c750afe18c605eeed2e09cf9b882c99fc76b"}, + {file = "ujson-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:bf30ac68d8b2aed665589cfd3608e43d8cd6cb74cd5a9379ab9af9861c33a8be"}, + {file = "ujson-3.1.0.tar.gz", hash = "sha256:00bda1de275ed6fe81817902189c75dfd156b4fa29b44dc1f4620775d2f50cf7"}, +] +uritemplate = [ + {file = "uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"}, + {file = "uritemplate-3.0.1.tar.gz", hash = "sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae"}, +] +urllib3 = [ + {file = "urllib3-1.25.10-py2.py3-none-any.whl", hash = "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461"}, + {file = "urllib3-1.25.10.tar.gz", hash = "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a"}, +] +uvloop = [ + {file = "uvloop-0.14.0-cp35-cp35m-macosx_10_11_x86_64.whl", hash = "sha256:08b109f0213af392150e2fe6f81d33261bb5ce968a288eb698aad4f46eb711bd"}, + {file = "uvloop-0.14.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:4544dcf77d74f3a84f03dd6278174575c44c67d7165d4c42c71db3fdc3860726"}, + {file = "uvloop-0.14.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:b4f591aa4b3fa7f32fb51e2ee9fea1b495eb75b0b3c8d0ca52514ad675ae63f7"}, + {file = "uvloop-0.14.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:f07909cd9fc08c52d294b1570bba92186181ca01fe3dc9ffba68955273dd7362"}, + {file = "uvloop-0.14.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:afd5513c0ae414ec71d24f6f123614a80f3d27ca655a4fcf6cabe50994cc1891"}, + {file = "uvloop-0.14.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:e7514d7a48c063226b7d06617cbb12a14278d4323a065a8d46a7962686ce2e95"}, + {file = "uvloop-0.14.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:bcac356d62edd330080aed082e78d4b580ff260a677508718f88016333e2c9c5"}, + {file = "uvloop-0.14.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:4315d2ec3ca393dd5bc0b0089d23101276778c304d42faff5dc4579cb6caef09"}, + {file = "uvloop-0.14.0.tar.gz", hash = "sha256:123ac9c0c7dd71464f58f1b4ee0bbd81285d96cdda8bc3519281b8973e3a461e"}, +] +wasabi = [ + {file = "wasabi-0.7.1.tar.gz", hash = "sha256:ee3809f4ce00e1e7f424b1572c753cff0dcaca2ca684e67e31f985033a9f070b"}, +] +watchdog = [ + {file = "watchdog-0.10.3.tar.gz", hash = "sha256:4214e1379d128b0588021880ccaf40317ee156d4603ac388b9adcf29165e0c04"}, +] +wcwidth = [ + {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, + {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, +] +webexteamssdk = [ + {file = "webexteamssdk-1.3.tar.gz", hash = "sha256:161e0bbc9b7b044f9b765b9b9767642740e3421a428ec7bfc34b1b8e25437127"}, +] +websocket-client = [ + {file = "websocket_client-0.57.0-py2.py3-none-any.whl", hash = "sha256:0fc45c961324d79c781bab301359d5a1b00b13ad1b10415a4780229ef71a5549"}, + {file = "websocket_client-0.57.0.tar.gz", hash = "sha256:d735b91d6d1692a6a181f2a8c9e0238e5f6373356f561bb9dc4c7af36f452010"}, +] +websockets = [ + {file = "websockets-8.0.2-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:e906128532a14b9d264a43eb48f9b3080d53a9bda819ab45bf56b8039dc606ac"}, + {file = "websockets-8.0.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:83e63aa73331b9ca21af61df8f115fb5fbcba3f281bee650a4ad16a40cd1ef15"}, + {file = "websockets-8.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e9102043a81cdc8b7c8032ff4bce39f6229e4ac39cb2010946c912eeb84e2cb6"}, + {file = "websockets-8.0.2-cp36-cp36m-win32.whl", hash = "sha256:8d7a20a2f97f1e98c765651d9fb9437201a9ccc2c70e94b0270f1c5ef29667a3"}, + {file = "websockets-8.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c82e286555f839846ef4f0fdd6910769a577952e1e26aa8ee7a6f45f040e3c2b"}, + {file = "websockets-8.0.2-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:73ce69217e4655783ec72ce11c151053fcbd5b837cc39de7999e19605182e28a"}, + {file = "websockets-8.0.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:8c77f7d182a6ea2a9d09c2612059f3ad859a90243e899617137ee3f6b7f2b584"}, + {file = "websockets-8.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a7affaeffbc5d55681934c16bb6b8fc82bb75b175e7fd4dcca798c938bde8dda"}, + {file = "websockets-8.0.2-cp37-cp37m-win32.whl", hash = "sha256:f5cb2683367e32da6a256b60929a3af9c29c212b5091cf5bace9358d03011bf5"}, + {file = "websockets-8.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:049e694abe33f8a1d99969fee7bfc0ae6761f7fd5f297c58ea933b27dd6805f2"}, + {file = "websockets-8.0.2.tar.gz", hash = "sha256:882a7266fa867a2ebb2c0baaa0f9159cabf131cf18c1b4270d79ad42f9208dc5"}, +] +werkzeug = [ + {file = "Werkzeug-1.0.1-py2.py3-none-any.whl", hash = "sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43"}, + {file = "Werkzeug-1.0.1.tar.gz", hash = "sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"}, +] +wheel = [ + {file = "wheel-0.35.1-py2.py3-none-any.whl", hash = "sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2"}, + {file = "wheel-0.35.1.tar.gz", hash = "sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f"}, +] +wrapt = [ + {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, +] +xmltodict = [ + {file = "xmltodict-0.12.0-py2.py3-none-any.whl", hash = "sha256:8bbcb45cc982f48b2ca8fe7e7827c5d792f217ecf1792626f808bf41c3b86051"}, + {file = "xmltodict-0.12.0.tar.gz", hash = "sha256:50d8c638ed7ecb88d90561beedbf720c9b4e851a9fa6c47ebd64e99d166d8a21"}, +] +yarl = [ + {file = "yarl-1.5.1-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:db6db0f45d2c63ddb1a9d18d1b9b22f308e52c83638c26b422d520a815c4b3fb"}, + {file = "yarl-1.5.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:17668ec6722b1b7a3a05cc0167659f6c95b436d25a36c2d52db0eca7d3f72593"}, + {file = "yarl-1.5.1-cp35-cp35m-win32.whl", hash = "sha256:040b237f58ff7d800e6e0fd89c8439b841f777dd99b4a9cca04d6935564b9409"}, + {file = "yarl-1.5.1-cp35-cp35m-win_amd64.whl", hash = "sha256:f18d68f2be6bf0e89f1521af2b1bb46e66ab0018faafa81d70f358153170a317"}, + {file = "yarl-1.5.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:c52ce2883dc193824989a9b97a76ca86ecd1fa7955b14f87bf367a61b6232511"}, + {file = "yarl-1.5.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ce584af5de8830d8701b8979b18fcf450cef9a382b1a3c8ef189bedc408faf1e"}, + {file = "yarl-1.5.1-cp36-cp36m-win32.whl", hash = "sha256:df89642981b94e7db5596818499c4b2219028f2a528c9c37cc1de45bf2fd3a3f"}, + {file = "yarl-1.5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:3a584b28086bc93c888a6c2aa5c92ed1ae20932f078c46509a66dce9ea5533f2"}, + {file = "yarl-1.5.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:da456eeec17fa8aa4594d9a9f27c0b1060b6a75f2419fe0c00609587b2695f4a"}, + {file = "yarl-1.5.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bc2f976c0e918659f723401c4f834deb8a8e7798a71be4382e024bcc3f7e23a8"}, + {file = "yarl-1.5.1-cp37-cp37m-win32.whl", hash = "sha256:4439be27e4eee76c7632c2427ca5e73703151b22cae23e64adb243a9c2f565d8"}, + {file = "yarl-1.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:48e918b05850fffb070a496d2b5f97fc31d15d94ca33d3d08a4f86e26d4e7c5d"}, + {file = "yarl-1.5.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9b930776c0ae0c691776f4d2891ebc5362af86f152dd0da463a6614074cb1b02"}, + {file = "yarl-1.5.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b3b9ad80f8b68519cc3372a6ca85ae02cc5a8807723ac366b53c0f089db19e4a"}, + {file = "yarl-1.5.1-cp38-cp38-win32.whl", hash = "sha256:f379b7f83f23fe12823085cd6b906edc49df969eb99757f58ff382349a3303c6"}, + {file = "yarl-1.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:9102b59e8337f9874638fcfc9ac3734a0cfadb100e47d55c20d0dc6087fb4692"}, + {file = "yarl-1.5.1.tar.gz", hash = "sha256:c22c75b5f394f3d47105045ea551e08a3e804dc7e01b37800ca35b58f856c3d6"}, +] +zipp = [ + {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"}, + {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"}, +] diff --git a/pyproject.toml b/pyproject.toml index d25488cb6132..e2e0b33fb99a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,15 +1,187 @@ +[build-system] +requires = [ "poetry>=1.0.5",] +build-backend = "poetry.masonry.api" + [tool.black] line-length = 88 -target-version = ['py27', 'py35', 'py36', 'py37'] -exclude = ''' -( - /( - \.eggs - | \.git - | \.pytype - | \.pytest_cache - | build - | dist - )/ -) -''' +target-version = [ "py36", "py37", "py38",] +exclude = "((.eggs | .git | .pytype | .pytest_cache | build | dist))" + +[tool.poetry] +name = "rasa" +version = "2.0.0a2" +description = "Open source machine learning framework to automate text- and voice-based conversations: NLU, dialogue management, connect to Slack, Facebook, and more - Create chatbots and voice assistants" +authors = [ "Rasa Technologies GmbH <hi@rasa.com>",] +maintainers = [ "Tom Bocklisch <tom@rasa.com>",] +homepage = "https://rasa.com" +repository = "https://github.com/rasahq/rasa" +documentation = "https://rasa.com/docs" +classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Software Development :: Libraries",] +keywords = [ "nlp", "machine-learning", "machine-learning-library", "bot", "bots", "botkit", "rasa conversational-agents", "conversational-ai", "chatbot", "chatbot-framework", "bot-framework",] +include = [ "LICENSE.txt", "README.md", "rasa/core/schemas/*", "rasa/core/training/visualization.html", "rasa/nlu/schemas/*", "rasa/cli/default_config.yml", "rasa/importers/*",] +readme = "README.md" +license = "Apache-2.0" +[[tool.poetry.source]] +name = "rasa-pypi" +url = "https://pypi.rasa.com/simple/" + +[tool.towncrier] +package = "rasa" +package_dir = "rasa" +filename = "CHANGELOG.mdx" +directory = "./changelog" +underlines = " " +title_format = "## [{version}] - {project_date}" +template = "./changelog/_template.md.jinja2" +start_string = "<!-- TOWNCRIER -->\n" +issue_format = "[#{issue}](https://github.com/rasahq/rasa/issues/{issue})" +[[tool.towncrier.type]] +directory = "removal" +name = "Deprecations and Removals" +showcontent = true + +[[tool.towncrier.type]] +directory = "feature" +name = "Features" +showcontent = true + +[[tool.towncrier.type]] +directory = "improvement" +name = "Improvements" +showcontent = true + +[[tool.towncrier.type]] +directory = "bugfix" +name = "Bugfixes" +showcontent = true + +[[tool.towncrier.type]] +directory = "doc" +name = "Improved Documentation" +showcontent = true + +[[tool.towncrier.type]] +directory = "misc" +name = "Miscellaneous internal changes" +showcontent = false + +[tool.poetry.dependencies] +python = ">=3.6,<3.9" +boto3 = "^1.12" +requests = "^2.23" +matplotlib = ">=3.1,<3.3" +attrs = "~19.3" +jsonpickle = ">=1.3,<1.5" +redis = "^3.4" +numpy = "^1.16" +scipy = "^1.4.1" +absl-py = "^0.9" +apscheduler = "~3.6" +tqdm = ">=4.31,<4.48" +networkx = "~2.4.0" +fbmessenger = "~6.0.0" +pykwalify = "~1.7.0" +coloredlogs = ">=10,<15" +"ruamel.yaml" = "^0.16" +scikit-learn = ">=0.22,<0.24" +slackclient = "^2.0.0" +python-telegram-bot = ">=11.1,<13.0" +twilio = ">=6.26,<6.43" +webexteamssdk = ">=1.1.1,<1.4.0" +mattermostwrapper = "~2.2" +rocketchat_API = ">=0.6.31,<1.5.0" +colorhash = "~1.0.2" +pika = "~1.1.0" +jsonschema = "~3.2" +packaging = ">=20.0,<21.0" +pytz = ">=2019.1,<2021.0" +rasa-sdk = "^2.0.0a2" +colorclass = "~2.2" +terminaltables = "~3.1.0" +sanic = "^19.12.2" +sanic-cors = "^0.10.0b1" +sanic-jwt = ">=1.3.2,<1.5.0" +cloudpickle = ">=1.2,<1.5" +multidict = "^4.6" +aiohttp = "~3.6" +questionary = "~1.5.1" +prompt-toolkit = "^2.0" +python-socketio = ">=4.4,<4.7" +python-engineio = ">=3.11,<3.14" +pydot = "~1.4" +async_generator = "~1.10" +SQLAlchemy = "~1.3.3" +sklearn-crfsuite = "~0.3" +psycopg2-binary = "~2.8.2" +PyJWT = "~1.7" +python-dateutil = "~2.8" +tensorflow = "~2.3" +tensorflow_hub = "~0.8" +tensorflow-addons = "~0.10" +tensorflow-estimator = "~2.3" +tensorflow-probability = "~0.10" +setuptools = ">=41.0.0" +kafka-python = ">=1.4,<3.0" +ujson = ">=1.35,<4.0" +oauth2client = "4.1.3" +regex = "~2020.6" +joblib = "^0.15.1" + +[tool.poetry.dev-dependencies] +pytest-cov = "^2.10.0" +pytest-localserver = "^0.5.0" +pytest-sanic = "^1.6.1" +pytest-asyncio = "^0.10.0" +pytest-xdist = "^1.32.0" +pytest = "^5.3.4" +freezegun = "^0.3.14" +responses = "^0.10.15" +aioresponses = "^0.6.2" +moto = "==1.3.14" +fakeredis = "^1.4.0" +mongomock = "^3.18.0" +black = "^19.10b0" +flake8 = "^3.8.3" +pytype = "^2020.6.1" +google-cloud-storage = "^1.29.0" +azure-storage-blob = "<12.4.0" +coveralls = "^2.0.0" +towncrier = "^19.2.0" +toml = "^0.10.0" +pep440-version-utils = "^0.3.0" +pydoc-markdown = "3.3.0.post1" + +[tool.poetry.extras] +spacy = [ "spacy",] +convert = [ "tensorflow-text",] +jieba = [ "jieba",] +transformers = [ "transformers",] +full = [ "spacy", "tensorflow-text", "transformers", "jieba",] +gh-release-notes = [ "github3.py",] + +[tool.poetry.scripts] +rasa = "rasa.__main__:main" + +[tool.poetry.dependencies.spacy] +version = ">=2.1,<2.3" +optional = true + +[tool.poetry.dependencies.tensorflow-text] +version = "~2.3" +optional = true + +[tool.poetry.dependencies."github3.py"] +version = "~1.3.0" +optional = true + +[tool.poetry.dependencies.transformers] +version = ">=2.4,<2.12" +optional = true + +[tool.poetry.dependencies.jieba] +version = ">=0.39, <0.43" +optional = true + +[tool.poetry.dependencies.pymongo] +version = ">=3.8,<3.11" +extras = [ "tls", "srv",] diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000000..e47eeb507662 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + skip_on_windows: mark a test as a test that shouldn't be executed on Windows. diff --git a/rasa/__main__.py b/rasa/__main__.py index 771371064bce..94125af8b333 100644 --- a/rasa/__main__.py +++ b/rasa/__main__.py @@ -1,13 +1,27 @@ +import sys import argparse import logging +import platform import rasa.utils.io - from rasa import version -from rasa.cli import scaffold, run, train, interactive, shell, test, visualize, data, x +from rasa.cli import ( + scaffold, + run, + train, + interactive, + shell, + test, + visualize, + data, + x, + export, +) from rasa.cli.arguments.default_arguments import add_logging_options from rasa.cli.utils import parse_last_positional_argument_as_model_path -from rasa.utils.common import set_log_level +from rasa.utils.common import set_log_level, set_log_and_warnings_filters +import rasa.utils.tensorflow.environment as tf_env +from rasa_sdk import __version__ as rasa_sdk_version logger = logging.getLogger(__name__) @@ -45,13 +59,29 @@ def create_argument_parser() -> argparse.ArgumentParser: test.add_subparser(subparsers, parents=parent_parsers) visualize.add_subparser(subparsers, parents=parent_parsers) data.add_subparser(subparsers, parents=parent_parsers) + export.add_subparser(subparsers, parents=parent_parsers) x.add_subparser(subparsers, parents=parent_parsers) return parser def print_version() -> None: - print ("Rasa", version.__version__) + """Prints version information of rasa tooling and python.""" + + python_version, os_info = sys.version.split("\n") + try: + from rasax.community.version import __version__ # pytype: disable=import-error + + rasa_x_info = __version__ + except ModuleNotFoundError: + rasa_x_info = None + + print(f"Rasa Version : {version.__version__}") + print(f"Rasa SDK Version : {rasa_sdk_version}") + print(f"Rasa X Version : {rasa_x_info}") + print(f"Python Version : {python_version}") + print(f"Operating System : {platform.platform()}") + print(f"Python Path : {sys.executable}") def main() -> None: @@ -68,11 +98,14 @@ def main() -> None: ) set_log_level(log_level) + tf_env.setup_tf_environment() + # insert current path in syspath so custom modules are found sys.path.insert(1, os.getcwd()) if hasattr(cmdline_arguments, "func"): rasa.utils.io.configure_colored_logging(log_level) + set_log_and_warnings_filters() cmdline_arguments.func(cmdline_arguments) elif hasattr(cmdline_arguments, "version"): print_version() diff --git a/rasa/cli/arguments/data.py b/rasa/cli/arguments/data.py index 441e24470315..69706dcd1c77 100644 --- a/rasa/cli/arguments/data.py +++ b/rasa/cli/arguments/data.py @@ -1,4 +1,5 @@ import argparse +from typing import Text from rasa.cli.arguments.default_arguments import ( add_nlu_data_param, @@ -8,8 +9,8 @@ ) -def set_convert_arguments(parser: argparse.ArgumentParser): - add_data_param(parser, required=True, default=None, data_type="Rasa NLU ") +def set_convert_arguments(parser: argparse.ArgumentParser, data_type: Text): + add_data_param(parser, required=True, default=None, data_type=data_type) add_out_param( parser, @@ -24,8 +25,10 @@ def set_convert_arguments(parser: argparse.ArgumentParser): "-f", "--format", required=True, - choices=["json", "md"], - help="Output format the training data should be converted into.", + choices=["json", "md", "yaml"], + help="Output format the training data should be converted into. " + "Note: currently training data can be converted to 'yaml' format " + "only from 'md' format", ) @@ -39,6 +42,13 @@ def set_split_arguments(parser: argparse.ArgumentParser): help="Percentage of the data which should be in the training data.", ) + parser.add_argument( + "--random-seed", + type=int, + default=None, + help="Seed to generate the same train/test split.", + ) + add_out_param( parser, default="train_test_split", diff --git a/rasa/cli/arguments/default_arguments.py b/rasa/cli/arguments/default_arguments.py index 857ccd267f5f..69a106a4028a 100644 --- a/rasa/cli/arguments/default_arguments.py +++ b/rasa/cli/arguments/default_arguments.py @@ -15,7 +15,7 @@ def add_model_param( model_name: Text = "Rasa", add_positional_arg: bool = True, default: Optional[Text] = DEFAULT_MODELS_PATH, -): +) -> None: help_text = ( "Path to a trained {} model. If a directory is specified, it will " "use the latest model in this directory.".format(model_name) @@ -36,7 +36,7 @@ def add_stories_param( "--stories", type=str, default=DEFAULT_DATA_PATH, - help="File or folder containing your {} stories.".format(stories_name), + help=f"File or folder containing your {stories_name} stories.", ) @@ -44,26 +44,28 @@ def add_nlu_data_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], help_text: Text, default: Optional[Text] = DEFAULT_DATA_PATH, -): +) -> None: parser.add_argument("-u", "--nlu", type=str, default=default, help=help_text) def add_domain_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] -): +) -> None: parser.add_argument( "-d", "--domain", type=str, default=DEFAULT_DOMAIN_PATH, - help="Domain specification (yml file).", + help="Domain specification. This can be a single YAML file, or a directory " + "that contains several files with domain specifications in it. The content " + "of these files will be read and merged together.", ) def add_config_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], default: Optional[Text] = DEFAULT_CONFIG_PATH, -): +) -> None: parser.add_argument( "-c", "--config", @@ -78,16 +80,18 @@ def add_out_param( help_text: Text, default: Optional[Text] = DEFAULT_MODELS_PATH, required: bool = False, -): +) -> None: parser.add_argument( "--out", type=str, default=default, help=help_text, required=required ) def add_endpoint_param( - parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], help_text: Text -): - parser.add_argument("--endpoints", type=str, default=None, help=help_text) + parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], + help_text: Text, + default: Optional[Text] = None, +) -> None: + parser.add_argument("--endpoints", type=str, default=default, help=help_text) def add_data_param( @@ -95,17 +99,17 @@ def add_data_param( default: Optional[Text] = DEFAULT_DATA_PATH, required: bool = False, data_type: Text = "Rasa ", -): +) -> None: parser.add_argument( "--data", type=str, default=default, - help="Path to the file or directory containing {} data.".format(data_type), + help=f"Path to the file or directory containing {data_type} data.", required=required, ) -def add_logging_options(parser: argparse.ArgumentParser): +def add_logging_options(parser: argparse.ArgumentParser) -> None: """Add options to an argument parser to configure logging levels.""" logging_arguments = parser.add_argument_group("Python Logging Options") diff --git a/rasa/cli/arguments/export.py b/rasa/cli/arguments/export.py new file mode 100644 index 000000000000..0c78a715665c --- /dev/null +++ b/rasa/cli/arguments/export.py @@ -0,0 +1,41 @@ +import argparse + +from rasa.cli.arguments import default_arguments +from rasa.constants import DEFAULT_ENDPOINTS_PATH + + +def set_export_arguments(parser: argparse.ArgumentParser) -> None: + default_arguments.add_endpoint_param( + parser, + default=DEFAULT_ENDPOINTS_PATH, + help_text=( + "Endpoint configuration file specifying the tracker store " + "and event broker." + ), + ) + + parser.add_argument( + "--minimum-timestamp", + type=float, + help=( + "Minimum timestamp of events to be exported. The constraint is applied " + "in a 'greater than or equal' comparison." + ), + ) + + parser.add_argument( + "--maximum-timestamp", + type=float, + help=( + "Maximum timestamp of events to be exported. The constraint is " + "applied in a 'less than' comparison." + ), + ) + + parser.add_argument( + "--conversation-ids", + help=( + "Comma-separated list of conversation IDs to migrate. If unset, " + "all available conversation IDs will be exported." + ), + ) diff --git a/rasa/cli/arguments/interactive.py b/rasa/cli/arguments/interactive.py index 98244eb803b8..c4ed2a6ac17e 100644 --- a/rasa/cli/arguments/interactive.py +++ b/rasa/cli/arguments/interactive.py @@ -1,4 +1,5 @@ import argparse +import uuid from rasa.cli.arguments.default_arguments import ( add_domain_param, @@ -12,45 +13,63 @@ add_config_param, add_out_param, add_debug_plots_param, - add_dump_stories_param, add_augmentation_param, + add_persist_nlu_data_param, ) +from rasa.cli.arguments.run import add_port_argument -def set_interactive_arguments(parser: argparse.ArgumentParser): +def set_interactive_arguments(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--e2e", + action="store_true", + help="Save story files in e2e format. In this format user messages " + "will be included in the stories.", + ) + add_port_argument(parser) + add_model_param(parser, default=None) add_data_param(parser) - add_skip_visualization_param(parser) - - add_endpoint_param( - parser, - help_text="Configuration file for the model server and the connectors as a yml file.", - ) + _add_common_params(parser) + train_arguments = _add_training_arguments(parser) - train_arguments = parser.add_argument_group("Train Arguments") - add_config_param(train_arguments) - add_domain_param(train_arguments) - add_out_param( - train_arguments, help_text="Directory where your models should be stored." - ) - add_augmentation_param(train_arguments) - add_debug_plots_param(train_arguments) - add_dump_stories_param(train_arguments) add_force_param(train_arguments) + add_persist_nlu_data_param(train_arguments) -def set_interactive_core_arguments(parser: argparse.ArgumentParser): +def set_interactive_core_arguments(parser: argparse.ArgumentParser) -> None: add_model_param(parser, model_name="Rasa Core", default=None) add_stories_param(parser) - add_skip_visualization_param(parser) + _add_common_params(parser) + _add_training_arguments(parser) + add_port_argument(parser) + + +def _add_common_params(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--skip-visualization", + default=False, + action="store_true", + help="Disable plotting the visualization during interactive learning.", + ) + + parser.add_argument( + "--conversation-id", + default=uuid.uuid4().hex, + help="Specify the id of the conversation the messages are in. Defaults to a " + "UUID that will be randomly generated.", + ) add_endpoint_param( parser, help_text="Configuration file for the model server and the connectors as a yml file.", ) + +# noinspection PyProtectedMember +def _add_training_arguments(parser: argparse.ArgumentParser) -> argparse._ArgumentGroup: train_arguments = parser.add_argument_group("Train Arguments") add_config_param(train_arguments) add_domain_param(train_arguments) @@ -59,13 +78,5 @@ def set_interactive_core_arguments(parser: argparse.ArgumentParser): ) add_augmentation_param(train_arguments) add_debug_plots_param(train_arguments) - add_dump_stories_param(train_arguments) - -def add_skip_visualization_param(parser: argparse.ArgumentParser): - parser.add_argument( - "--skip-visualization", - default=False, - action="store_true", - help="Disable plotting the visualization during interactive learning.", - ) + return train_arguments diff --git a/rasa/cli/arguments/run.py b/rasa/cli/arguments/run.py index 68dc85a74fab..16e65e76cf21 100644 --- a/rasa/cli/arguments/run.py +++ b/rasa/cli/arguments/run.py @@ -1,21 +1,37 @@ import argparse +from typing import Union from rasa.cli.arguments.default_arguments import add_model_param, add_endpoint_param from rasa.core import constants def set_run_arguments(parser: argparse.ArgumentParser): + """Arguments for running Rasa directly using `rasa run`.""" add_model_param(parser) add_server_arguments(parser) def set_run_action_arguments(parser: argparse.ArgumentParser): + """Set arguments for running Rasa SDK.""" import rasa_sdk.cli.arguments as sdk sdk.add_endpoint_arguments(parser) +# noinspection PyProtectedMember +def add_port_argument(parser: Union[argparse.ArgumentParser, argparse._ArgumentGroup]): + """Add an argument for port.""" + parser.add_argument( + "-p", + "--port", + default=constants.DEFAULT_SERVER_PORT, + type=int, + help="Port to run the server at.", + ) + + def add_server_arguments(parser: argparse.ArgumentParser): + """Add arguments for running API endpoint.""" parser.add_argument( "--log-file", type=str, @@ -31,13 +47,9 @@ def add_server_arguments(parser: argparse.ArgumentParser): ) server_arguments = parser.add_argument_group("Server Settings") - server_arguments.add_argument( - "-p", - "--port", - default=constants.DEFAULT_SERVER_PORT, - type=int, - help="Port to run the server at.", - ) + + add_port_argument(server_arguments) + server_arguments.add_argument( "-t", "--auth-token", @@ -56,6 +68,12 @@ def add_server_arguments(parser: argparse.ArgumentParser): action="store_true", help="Start the web server API in addition to the input channel.", ) + server_arguments.add_argument( + "--response-timeout", + default=constants.DEFAULT_RESPONSE_TIMEOUT, + type=int, + help="Maximum time a response can take to process (sec).", + ) server_arguments.add_argument( "--remote-storage", help="Set the remote location where your Rasa model is stored, e.g. on AWS.", @@ -67,6 +85,11 @@ def add_server_arguments(parser: argparse.ArgumentParser): server_arguments.add_argument( "--ssl-keyfile", help="Set the SSL Keyfile to create a TLS secured server." ) + server_arguments.add_argument( + "--ssl-ca-file", + help="If your SSL certificate needs to be verified, you can specify the CA file " + "using this parameter.", + ) server_arguments.add_argument( "--ssl-password", help="If your ssl-keyfile is protected by a password, you can specify it " diff --git a/rasa/cli/arguments/test.py b/rasa/cli/arguments/test.py index 02bc6f1b84a6..ca75d6528828 100644 --- a/rasa/cli/arguments/test.py +++ b/rasa/cli/arguments/test.py @@ -22,10 +22,18 @@ def set_test_arguments(parser: argparse.ArgumentParser): nlu_arguments = parser.add_argument_group("NLU Test Arguments") add_test_nlu_argument_group(nlu_arguments) + add_no_plot_param(parser) + add_errors_success_params(parser) + add_out_param( + parser, + default=DEFAULT_RESULTS_PATH, + help_text="Output path for any files created during the evaluation.", + ) + def set_test_core_arguments(parser: argparse.ArgumentParser): add_test_core_model_param(parser) - add_test_core_argument_group(parser) + add_test_core_argument_group(parser, include_e2e_argument=True) def set_test_nlu_arguments(parser: argparse.ArgumentParser): @@ -34,7 +42,8 @@ def set_test_nlu_arguments(parser: argparse.ArgumentParser): def add_test_core_argument_group( - parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] + parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], + include_e2e_argument: bool = False, ): add_stories_param(parser, "test") parser.add_argument( @@ -45,14 +54,15 @@ def add_test_core_argument_group( default=DEFAULT_RESULTS_PATH, help_text="Output path for any files created during the evaluation.", ) - parser.add_argument( - "--e2e", - "--end-to-end", - action="store_true", - help="Run an end-to-end evaluation for combined action and " - "intent prediction. Requires a story file in end-to-end " - "format.", - ) + if include_e2e_argument: + parser.add_argument( + "--e2e", + "--end-to-end", + action="store_true", + help="Run an end-to-end evaluation for combined action and " + "intent prediction. Requires a story file in end-to-end " + "format.", + ) add_endpoint_param( parser, help_text="Configuration file for the connectors as a yml file." ) @@ -79,6 +89,8 @@ def add_test_core_argument_group( "All models in the provided directory are evaluated " "and compared against each other.", ) + add_no_plot_param(parser) + add_errors_success_params(parser) def add_test_nlu_argument_group( @@ -91,33 +103,6 @@ def add_test_nlu_argument_group( default=DEFAULT_RESULTS_PATH, help_text="Output path for any files created during the evaluation.", ) - - parser.add_argument( - "--successes", - action="store_true", - default=False, - help="If set successful predictions (intent and entities) will be written " - "to a file.", - ) - parser.add_argument( - "--no-errors", - action="store_true", - default=False, - help="If set incorrect predictions (intent and entities) will NOT be written " - "to a file.", - ) - parser.add_argument( - "--histogram", - required=False, - default="hist.png", - help="Output path for the confidence histogram.", - ) - parser.add_argument( - "--confmat", - required=False, - default="confmat.png", - help="Output path for the confusion matrix plot.", - ) parser.add_argument( "-c", "--config", @@ -140,7 +125,7 @@ def add_test_nlu_argument_group( "-f", "--folds", required=False, - default=10, + default=5, help="Number of cross validation folds (cross validation only).", ) comparison_arguments = parser.add_argument_group("Comparison Mode") @@ -162,6 +147,9 @@ def add_test_nlu_argument_group( help="Percentages of training data to exclude during comparison.", ) + add_no_plot_param(parser) + add_errors_success_params(parser) + def add_test_core_model_param(parser: argparse.ArgumentParser): default_path = get_latest_model(DEFAULT_MODELS_PATH) @@ -175,3 +163,31 @@ def add_test_core_model_param(parser: argparse.ArgumentParser): "will be used (exception: '--evaluate-model-directory' flag is set). If multiple " "'tar.gz' files are provided, all those models will be compared.", ) + + +def add_no_plot_param( + parser: argparse.ArgumentParser, default: bool = False, required: bool = False +) -> None: + parser.add_argument( + "--no-plot", + dest="disable_plotting", + action="store_true", + default=default, + help="Don't render evaluation plots.", + required=required, + ) + + +def add_errors_success_params(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--successes", + action="store_true", + default=False, + help="If set successful predictions will be written to a file.", + ) + parser.add_argument( + "--no-errors", + action="store_true", + default=False, + help="If set incorrect predictions will NOT be written to a file.", + ) diff --git a/rasa/cli/arguments/train.py b/rasa/cli/arguments/train.py index 6cb1d6be5062..396e532db176 100644 --- a/rasa/cli/arguments/train.py +++ b/rasa/cli/arguments/train.py @@ -19,9 +19,11 @@ def set_train_arguments(parser: argparse.ArgumentParser): add_augmentation_param(parser) add_debug_plots_param(parser) - add_dump_stories_param(parser) + + add_num_threads_param(parser) add_model_name_param(parser) + add_persist_nlu_data_param(parser) add_force_param(parser) @@ -33,7 +35,6 @@ def set_train_core_arguments(parser: argparse.ArgumentParser): add_augmentation_param(parser) add_debug_plots_param(parser) - add_dump_stories_param(parser) add_force_param(parser) @@ -49,7 +50,10 @@ def set_train_nlu_arguments(parser: argparse.ArgumentParser): add_nlu_data_param(parser, help_text="File or folder containing your NLU data.") + add_num_threads_param(parser) + add_model_name_param(parser) + add_persist_nlu_data_param(parser) def add_force_param(parser: Union[argparse.ArgumentParser, argparse._ActionsContainer]): @@ -107,27 +111,27 @@ def add_augmentation_param( ) -def add_dump_stories_param( +def add_debug_plots_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] ): parser.add_argument( - "--dump-stories", + "--debug-plots", default=False, action="store_true", - help="If enabled, save flattened stories to a file.", + help="If enabled, will create plots showing checkpoints " + "and their connections between story blocks in a " + "file called `story_blocks_connections.html`.", ) -def add_debug_plots_param( +def add_num_threads_param( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] ): parser.add_argument( - "--debug-plots", - default=False, - action="store_true", - help="If enabled, will create plots showing checkpoints " - "and their connections between story blocks in a " - "file called `story_blocks_connections.html`.", + "--num-threads", + type=int, + default=1, + help="Maximum amount of threads to use when training.", ) @@ -138,3 +142,13 @@ def add_model_name_param(parser: argparse.ArgumentParser): help="If set, the name of the model file/directory will be set to the given " "name.", ) + + +def add_persist_nlu_data_param( + parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] +): + parser.add_argument( + "--persist-nlu-data", + action="store_true", + help="Persist the nlu training data in the saved model.", + ) diff --git a/rasa/cli/arguments/x.py b/rasa/cli/arguments/x.py index 88c3871cfb60..11d4580e91c1 100644 --- a/rasa/cli/arguments/x.py +++ b/rasa/cli/arguments/x.py @@ -1,14 +1,16 @@ import argparse - -from rasa.cli.arguments.default_arguments import add_model_param, add_data_param +from rasa.cli.arguments import default_arguments from rasa.cli.arguments.run import add_server_arguments from rasa.constants import DEFAULT_DATA_PATH, DEFAULT_RASA_X_PORT def set_x_arguments(parser: argparse.ArgumentParser): - add_model_param(parser, add_positional_arg=False) + default_arguments.add_model_param(parser, add_positional_arg=False) - add_data_param(parser, default=DEFAULT_DATA_PATH, data_type="stories and Rasa NLU ") + default_arguments.add_data_param( + parser, default=DEFAULT_DATA_PATH, data_type="stories and Rasa NLU " + ) + default_arguments.add_config_param(parser) parser.add_argument( "--no-prompt", diff --git a/rasa/cli/data.py b/rasa/cli/data.py index 0f8009e035fe..9153a25e37db 100644 --- a/rasa/cli/data.py +++ b/rasa/cli/data.py @@ -1,20 +1,37 @@ +import logging import argparse import asyncio -import sys +import os +from pathlib import Path from typing import List from rasa import data from rasa.cli.arguments import data as arguments -from rasa.cli.utils import get_validated_path +import rasa.cli.utils from rasa.constants import DEFAULT_DATA_PATH +from rasa.core.interpreter import RegexInterpreter +from rasa.core.training.story_reader.markdown_story_reader import MarkdownStoryReader +from rasa.core.training.story_writer.yaml_story_writer import YAMLStoryWriter +from rasa.nlu.convert import convert_training_data +from rasa.nlu.training_data.formats import MarkdownReader +from rasa.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter +from rasa.validator import Validator +from rasa.importers.rasa import RasaFileImporter +from rasa.cli.utils import ( + print_success, + print_error_and_exit, + print_info, + print_warning, +) + +logger = logging.getLogger(__name__) +CONVERTED_FILE_SUFFIX = "_converted.yml" # noinspection PyProtectedMember def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): - import rasa.nlu.convert as convert - data_parser = subparsers.add_parser( "data", conflict_handler="resolve", @@ -25,6 +42,17 @@ def add_subparser( data_parser.set_defaults(func=lambda _: data_parser.print_help(None)) data_subparsers = data_parser.add_subparsers() + + _add_data_convert_parsers(data_subparsers, parents) + _add_data_split_parsers(data_subparsers, parents) + _add_data_validate_parsers(data_subparsers, parents) + + +def _add_data_convert_parsers( + data_subparsers, parents: List[argparse.ArgumentParser] +) -> None: + from rasa.nlu import convert + convert_parser = data_subparsers.add_parser( "convert", formatter_class=argparse.ArgumentDefaultsHelpFormatter, @@ -38,12 +66,26 @@ def add_subparser( "nlu", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, - help="Converts NLU data between Markdown and json formats.", + help="Converts NLU data between formats.", + ) + convert_nlu_parser.set_defaults(func=_convert_nlu_data) + + arguments.set_convert_arguments(convert_nlu_parser, data_type="Rasa NLU") + + convert_core_parser = convert_subparsers.add_parser( + "core", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + parents=parents, + help="Converts Core data between formats.", ) - convert_nlu_parser.set_defaults(func=convert.main) + convert_core_parser.set_defaults(func=_convert_core_data) - arguments.set_convert_arguments(convert_nlu_parser) + arguments.set_convert_arguments(convert_core_parser, data_type="Rasa Core") + +def _add_data_split_parsers( + data_subparsers, parents: List[argparse.ArgumentParser] +) -> None: split_parser = data_subparsers.add_parser( "split", formatter_class=argparse.ArgumentDefaultsHelpFormatter, @@ -64,44 +106,187 @@ def add_subparser( arguments.set_split_arguments(nlu_split_parser) + +def _add_data_validate_parsers( + data_subparsers, parents: List[argparse.ArgumentParser] +) -> None: validate_parser = data_subparsers.add_parser( "validate", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Validates domain and data files to check for possible mistakes.", ) + _append_story_structure_arguments(validate_parser) validate_parser.set_defaults(func=validate_files) arguments.set_validator_arguments(validate_parser) + validate_subparsers = validate_parser.add_subparsers() + story_structure_parser = validate_subparsers.add_parser( + "stories", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + parents=parents, + help="Checks for inconsistencies in the story files.", + ) + _append_story_structure_arguments(story_structure_parser) + story_structure_parser.set_defaults(func=validate_stories) + arguments.set_validator_arguments(story_structure_parser) -def split_nlu_data(args): + +def _append_story_structure_arguments(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--max-history", + type=int, + default=None, + help="Number of turns taken into account for story structure validation.", + ) + + +def split_nlu_data(args: argparse.Namespace) -> None: from rasa.nlu.training_data.loading import load_data from rasa.nlu.training_data.util import get_file_format - data_path = get_validated_path(args.nlu, "nlu", DEFAULT_DATA_PATH) + data_path = rasa.cli.utils.get_validated_path(args.nlu, "nlu", DEFAULT_DATA_PATH) data_path = data.get_nlu_directory(data_path) nlu_data = load_data(data_path) fformat = get_file_format(data_path) - train, test = nlu_data.train_test_split(args.training_fraction) - - train.persist(args.out, filename="training_data.{}".format(fformat)) - test.persist(args.out, filename="test_data.{}".format(fformat)) + train, test = nlu_data.train_test_split(args.training_fraction, args.random_seed) + train.persist(args.out, filename=f"training_data.{fformat}") + test.persist(args.out, filename=f"test_data.{fformat}") -def validate_files(args): - """Validate all files needed for training a model. - Fails with a non-zero exit code if there are any errors in the data.""" - from rasa.core.validator import Validator - from rasa.importers.rasa import RasaFileImporter +def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None: + """ + Validates either the story structure or the entire project. + Args: + args: Commandline arguments + stories_only: If `True`, only the story structure is validated. + """ loop = asyncio.get_event_loop() file_importer = RasaFileImporter( domain_path=args.domain, training_data_paths=args.data ) validator = loop.run_until_complete(Validator.from_importer(file_importer)) - everything_is_alright = validator.verify_all(not args.fail_on_warnings) - sys.exit(0) if everything_is_alright else sys.exit(1) + + if stories_only: + all_good = _validate_story_structure(validator, args) + else: + all_good = ( + _validate_domain(validator) + and _validate_nlu(validator, args) + and _validate_story_structure(validator, args) + ) + + if not all_good: + rasa.cli.utils.print_error_and_exit("Project validation completed with errors.") + + +def validate_stories(args: argparse.Namespace) -> None: + validate_files(args, stories_only=True) + + +def _validate_domain(validator: Validator) -> bool: + return validator.verify_domain_validity() + + +def _validate_nlu(validator: Validator, args: argparse.Namespace) -> bool: + return validator.verify_nlu(not args.fail_on_warnings) + + +def _validate_story_structure(validator: Validator, args: argparse.Namespace) -> bool: + # Check if a valid setting for `max_history` was given + if isinstance(args.max_history, int) and args.max_history < 1: + raise argparse.ArgumentTypeError( + f"The value of `--max-history {args.max_history}` is not a positive integer." + ) + + return validator.verify_story_structure( + not args.fail_on_warnings, max_history=args.max_history + ) + + +def _convert_nlu_data(args: argparse.Namespace) -> None: + if args.format in ["json", "md"]: + convert_training_data(args.data, args.out, args.format, args.language) + elif args.format == "yaml": + _convert_to_yaml(args, True) + else: + print_error_and_exit( + "Could not recognize output format. Supported output formats: 'json', " + "'md', 'yaml'. Specify the desired output format with '--format'." + ) + + +def _convert_core_data(args: argparse.Namespace) -> None: + if args.format == "yaml": + _convert_to_yaml(args, False) + else: + print_error_and_exit( + "Could not recognize output format. Supported output formats: " + "'yaml'. Specify the desired output format with '--format'." + ) + + +def _convert_to_yaml(args: argparse.Namespace, is_nlu: bool) -> None: + + output = Path(args.out) + if not os.path.exists(output): + print_error_and_exit( + f"The output path '{output}' doesn't exist. Please make sure to specify " + f"an existing directory and try again." + ) + + training_data = Path(args.data) + if not os.path.exists(training_data): + print_error_and_exit( + f"The training data path {training_data} doesn't exist " + f"and will be skipped." + ) + + num_of_files_converted = 0 + for file in os.listdir(training_data): + source_path = training_data / file + output_path = output / f"{source_path.stem}{CONVERTED_FILE_SUFFIX}" + + if MarkdownReader.is_markdown_nlu_file(source_path): + if not is_nlu: + continue + _write_nlu_yaml(source_path, output_path, source_path) + num_of_files_converted += 1 + elif not is_nlu and MarkdownStoryReader.is_markdown_story_file(source_path): + _write_core_yaml(source_path, output_path, source_path) + num_of_files_converted += 1 + else: + print_warning(f"Skipped file: '{source_path}'.") + + print_info(f"Converted {num_of_files_converted} file(s), saved in '{output}'.") + + +def _write_nlu_yaml( + training_data_path: Path, output_path: Path, source_path: Path +) -> None: + reader = MarkdownReader() + writer = RasaYAMLWriter() + + training_data = reader.read(training_data_path) + writer.dump(output_path, training_data) + + print_success(f"Converted NLU file: '{source_path}' >> '{output_path}'.") + + +def _write_core_yaml( + training_data_path: Path, output_path: Path, source_path: Path +) -> None: + reader = MarkdownStoryReader(RegexInterpreter()) + writer = YAMLStoryWriter() + + loop = asyncio.get_event_loop() + steps = loop.run_until_complete(reader.read_from_file(training_data_path)) + + writer.dump(output_path, steps) + + print_success(f"Converted Core file: '{source_path}' >> '{output_path}'.") diff --git a/rasa/cli/default_config.yml b/rasa/cli/default_config.yml index b33def9720bf..93b81250a29c 100644 --- a/rasa/cli/default_config.yml +++ b/rasa/cli/default_config.yml @@ -3,7 +3,7 @@ language: en pipeline: supervised_embeddings policies: - - name: KerasPolicy + - name: TEDPolicy epochs: 200 batch_size: 50 max_training_samples: 300 diff --git a/rasa/cli/export.py b/rasa/cli/export.py new file mode 100644 index 000000000000..98605b4268dc --- /dev/null +++ b/rasa/cli/export.py @@ -0,0 +1,235 @@ +import argparse +import logging +import typing +from typing import List, Text, Optional + +import rasa.cli.utils as cli_utils +import rasa.core.utils as rasa_core_utils +from rasa.cli.arguments import export as arguments +from rasa.constants import DOCS_URL_TRACKER_STORES, DOCS_URL_EVENT_BROKERS +from rasa.exceptions import PublishingError, RasaException + +if typing.TYPE_CHECKING: + from rasa.core.brokers.broker import EventBroker + from rasa.core.brokers.pika import PikaEventBroker + from rasa.core.tracker_store import TrackerStore + from rasa.core.exporter import Exporter + from rasa.core.utils import AvailableEndpoints + +logger = logging.getLogger(__name__) + + +# noinspection PyProtectedMember +def add_subparser( + subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] +) -> None: + """Add subparser for `rasa export`. + + Args: + subparsers: Subparsers action object to which `argparse.ArgumentParser` + objects can be added. + parents: `argparse.ArgumentParser` objects whose arguments should also be + included. + """ + export_parser = subparsers.add_parser( + "export", + parents=parents, + conflict_handler="resolve", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + help="Export conversations using an event broker.", + ) + export_parser.set_defaults(func=export_trackers) + + arguments.set_export_arguments(export_parser) + + +def _get_tracker_store(endpoints: "AvailableEndpoints") -> "TrackerStore": + """Get `TrackerStore` from `endpoints`. + + Prints an error and exits if no tracker store could be loaded. + + Args: + endpoints: `AvailableEndpoints` to initialize the tracker store from. + + Returns: + Initialized tracker store. + + """ + if not endpoints.tracker_store: + cli_utils.print_error_and_exit( + f"Could not find a `tracker_store` section in the supplied " + f"endpoints file. Instructions on how to configure a tracker store " + f"can be found here: {DOCS_URL_TRACKER_STORES}. " + f"Exiting. " + ) + + from rasa.core.tracker_store import TrackerStore + + return TrackerStore.create(endpoints.tracker_store) + + +def _get_event_broker(endpoints: "AvailableEndpoints") -> Optional["EventBroker"]: + """Get `EventBroker` from `endpoints`. + + Prints an error and exits if no event broker could be loaded. + + Args: + endpoints: `AvailableEndpoints` to initialize the event broker from. + + Returns: + Initialized event broker. + + """ + if not endpoints.event_broker: + cli_utils.print_error_and_exit( + f"Could not find an `event_broker` section in the supplied " + f"endpoints file. Instructions on how to configure an event broker " + f"can be found here: {DOCS_URL_EVENT_BROKERS}. Exiting." + ) + + from rasa.core.brokers.broker import EventBroker + + return EventBroker.create(endpoints.event_broker) + + +def _get_requested_conversation_ids( + conversation_ids_arg: Optional[Text] = None, +) -> Optional[List[Text]]: + """Get list of conversation IDs requested as a command-line argument. + + Args: + conversation_ids_arg: Value of `--conversation-ids` command-line argument. + If provided, this is a string of comma-separated conversation IDs. + + Return: + List of conversation IDs requested as a command-line argument. + `None` if that argument was left unspecified. + + """ + if not conversation_ids_arg: + return None + + return conversation_ids_arg.split(",") + + +def _assert_max_timestamp_is_greater_than_min_timestamp( + args: argparse.Namespace, +) -> None: + """Inspect CLI timestamp parameters. + + Prints an error and exits if a maximum timestamp is provided that is smaller + than the provided minimum timestamp. + + Args: + args: Command-line arguments to process. + + """ + min_timestamp = args.minimum_timestamp + max_timestamp = args.maximum_timestamp + + if ( + min_timestamp is not None + and max_timestamp is not None + and max_timestamp < min_timestamp + ): + cli_utils.print_error_and_exit( + f"Maximum timestamp '{max_timestamp}' is smaller than minimum " + f"timestamp '{min_timestamp}'. Exiting." + ) + + +def _prepare_event_broker(event_broker: "EventBroker") -> None: + """Sets `should_keep_unpublished_messages` flag to `False` if + `self.event_broker` is a `PikaEventBroker`. + + If publishing of events fails, the `PikaEventBroker` instance should not keep a + list of unpublished messages, so we can retry publishing them. This is because + the instance is launched as part of this short-lived export script, meaning the + object is destroyed before it might be published. + + In addition, wait until the event broker reports a `ready` state. + + """ + from rasa.core.brokers.pika import PikaEventBroker + + if isinstance(event_broker, PikaEventBroker): + event_broker.should_keep_unpublished_messages = False + event_broker.raise_on_failure = True + + if not event_broker.is_ready(): + cli_utils.print_error_and_exit( + f"Event broker of type '{type(event_broker)}' is not ready. Exiting." + ) + + +def export_trackers(args: argparse.Namespace) -> None: + """Export events for a connected tracker store using an event broker. + + Args: + args: Command-line arguments to process. + + """ + _assert_max_timestamp_is_greater_than_min_timestamp(args) + + endpoints = rasa_core_utils.read_endpoints_from_path(args.endpoints) + tracker_store = _get_tracker_store(endpoints) + event_broker = _get_event_broker(endpoints) + _prepare_event_broker(event_broker) + requested_conversation_ids = _get_requested_conversation_ids(args.conversation_ids) + + from rasa.core.exporter import Exporter + + exporter = Exporter( + tracker_store, + event_broker, + args.endpoints, + requested_conversation_ids, + args.minimum_timestamp, + args.maximum_timestamp, + ) + + try: + published_events = exporter.publish_events() + cli_utils.print_success( + f"Done! Successfully published {published_events} events 🎉" + ) + + except PublishingError as e: + command = _get_continuation_command(exporter, e.timestamp) + cli_utils.print_error_and_exit( + f"Encountered error while publishing event with timestamp '{e}'. To " + f"continue where I left off, run the following command:" + f"\n\n\t{command}\n\nExiting." + ) + + except RasaException as e: + cli_utils.print_error_and_exit(str(e)) + + +def _get_continuation_command(exporter: "Exporter", timestamp: float) -> Text: + """Build CLI command to continue 'rasa export' where it was interrupted. + + Called when event publishing stops due to an error. + + Args: + exporter: Exporter object containing objects relevant for this export. + timestamp: Timestamp of the last event attempted to be published. + + """ + # build CLI command command based on supplied timestamp and options + command = "rasa export" + + if exporter.endpoints_path is not None: + command += f" --endpoints {exporter.endpoints_path}" + + command += f" --minimum-timestamp {timestamp}" + + if exporter.maximum_timestamp is not None: + command += f" --maximum-timestamp {exporter.maximum_timestamp}" + + if exporter.requested_conversation_ids: + command += ( + f" --conversation-ids {','.join(exporter.requested_conversation_ids)}" + ) + + return command diff --git a/rasa/cli/initial_project/actions.py b/rasa/cli/initial_project/actions.py index ba9fe0a58945..341e5372168a 100644 --- a/rasa/cli/initial_project/actions.py +++ b/rasa/cli/initial_project/actions.py @@ -22,6 +22,6 @@ # tracker: Tracker, # domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: # -# dispatcher.utter_message("Hello World!") +# dispatcher.utter_message(text="Hello World!") # # return [] diff --git a/rasa/cli/initial_project/config.yml b/rasa/cli/initial_project/config.yml index 3351bb25e810..fe840bb451c9 100644 --- a/rasa/cli/initial_project/config.yml +++ b/rasa/cli/initial_project/config.yml @@ -1,11 +1,11 @@ # Configuration for Rasa NLU. # https://rasa.com/docs/rasa/nlu/components/ language: en -pipeline: supervised_embeddings + +pipeline: +# This key can be left empty. The pipeline will then be provided from a default configuration during training. # Configuration for Rasa Core. # https://rasa.com/docs/rasa/core/policies/ policies: - - name: MemoizationPolicy - - name: KerasPolicy - - name: MappingPolicy +# This key can be left empty. Policies will then be provided from a default configuration during training. diff --git a/rasa/cli/initial_project/credentials.yml b/rasa/cli/initial_project/credentials.yml index d8abc16ae735..56b52608b063 100644 --- a/rasa/cli/initial_project/credentials.yml +++ b/rasa/cli/initial_project/credentials.yml @@ -15,11 +15,19 @@ rest: #slack: # slack_token: "<your slack token>" # slack_channel: "<the slack channel>" +# proxy: "<your HTTP outgoing proxy>" #socketio: # user_message_evt: <event name for user message> -# bot_message_evt: <event name for but messages> +# bot_message_evt: <event name for bot messages> # session_persistence: <true/false> +#mattermost: +# url: "https://<mattermost instance>/api/v4" +# token: "<bot token>" +# webhook_url: "<callback URL>" + +# This entry is needed if you are using Rasa X. The entry represents credentials +# for the Rasa X "channel", i.e. Talk to your bot and Share with guest testers. rasa: url: "http://localhost:5002/api" diff --git a/rasa/cli/initial_project/data/nlu.yml b/rasa/cli/initial_project/data/nlu.yml new file mode 100644 index 000000000000..d66033737e3e --- /dev/null +++ b/rasa/cli/initial_project/data/nlu.yml @@ -0,0 +1,91 @@ +version: "2.0" + +nlu: +- intent: greet + examples: | + - hey + - hello + - hi + - hello there + - good morning + - good evening + - moin + - hey there + - let's go + - hey dude + - goodmorning + - goodevening + - good afternoon + +- intent: goodbye + examples: | + - good afternoon + - cu + - good by + - cee you later + - good night + - bye + - goodbye + - have a nice day + - see you around + - bye bye + - see you later + +- intent: affirm + examples: | + - yes + - y + - indeed + - of course + - that sounds good + - correct + +- intent: deny + examples: | + - no + - n + - never + - I don't think so + - don't like that + - no way + +- intent: mood_great + examples: | + - perfect + - great + - amazing + - feeling like a king + - wonderful + - I am feeling very good + - I am great + - I am amazing + - I am going to save the world + - super stoked + - extremely good + - so so perfect + - so good + - so perfect + +- intent: mood_unhappy + examples: | + - my day was horrible + - I am sad + - I don't feel very well + - I am disappointed + - super sad + - I'm so sad + - sad + - very sad + - unhappy + - not good + - not very good + - extremly sad + - so saad + - so sad + +- intent: bot_challenge + examples: | + - are you a bot? + - are you a human? + - am I talking to a bot? + - am I talking to a human? diff --git a/rasa/cli/initial_project/data/rules.yml b/rasa/cli/initial_project/data/rules.yml new file mode 100644 index 000000000000..51f030d081ae --- /dev/null +++ b/rasa/cli/initial_project/data/rules.yml @@ -0,0 +1,13 @@ +version: "2.0" + +rules: + +- rule: Say goodbye anytime the user says goodbye + steps: + - intent: goodbye + - action: utter_goodbye + +- rule: Say 'I am a bot' anytime the user challenges + steps: + - intent: bot_challenge + - action: utter_iamabot diff --git a/rasa/cli/initial_project/data/stories.yml b/rasa/cli/initial_project/data/stories.yml new file mode 100644 index 000000000000..077651b970cb --- /dev/null +++ b/rasa/cli/initial_project/data/stories.yml @@ -0,0 +1,30 @@ +version: "2.0" + +stories: + +- story: happy path + steps: + - intent: greet + - action: utter_greet + - intent: mood_great + - action: utter_happy + +- story: sad path 1 + steps: + - intent: greet + - action: utter_greet + - intent: mood_unhappy + - action: utter_cheer_up + - action: utter_did_that_help + - intent: affirm + - action: utter_happy + +- story: sad path 2 + steps: + - intent: greet + - action: utter_greet + - intent: mood_unhappy + - action: utter_cheer_up + - action: utter_did_that_help + - intent: deny + - action: utter_goodbye diff --git a/rasa/cli/initial_project/domain.yml b/rasa/cli/initial_project/domain.yml index c4772bb14737..b522557791c1 100644 --- a/rasa/cli/initial_project/domain.yml +++ b/rasa/cli/initial_project/domain.yml @@ -1,3 +1,5 @@ +version: "2.0" + intents: - greet - goodbye @@ -7,15 +9,7 @@ intents: - mood_unhappy - bot_challenge -actions: -- utter_greet -- utter_cheer_up -- utter_did_that_help -- utter_happy -- utter_goodbye -- utter_iamabot - -templates: +responses: utter_greet: - text: "Hey! How are you?" @@ -34,3 +28,7 @@ templates: utter_iamabot: - text: "I am a bot, powered by Rasa." + +session_config: + session_expiration_time: 60 + carry_over_slots_to_new_session: true diff --git a/rasa/cli/initial_project/endpoints.yml b/rasa/cli/initial_project/endpoints.yml index e27f6c516c24..2330d15c929e 100644 --- a/rasa/cli/initial_project/endpoints.yml +++ b/rasa/cli/initial_project/endpoints.yml @@ -1,7 +1,7 @@ # This file contains the different endpoints your bot can use. # Server where the models are pulled from. -# https://rasa.com/docs/rasa/user-guide/running-the-server/#fetching-models-from-a-server/ +# https://rasa.com/docs/rasa/user-guide/configuring-http-api/#fetching-models-from-a-server/ #models: # url: http://my-server.com/models/default_core@latest @@ -23,6 +23,7 @@ # port: <port of your redis instance, usually 6379> # db: <number of your database within redis, e.g. 0> # password: <password used for authentication> +# use_ssl: <whether or not the communication is encrypted, default false> #tracker_store: # type: mongod diff --git a/rasa/cli/initial_project/tests/conversation_tests.md b/rasa/cli/initial_project/tests/conversation_tests.md new file mode 100644 index 000000000000..d7bcbfcbfe4e --- /dev/null +++ b/rasa/cli/initial_project/tests/conversation_tests.md @@ -0,0 +1,51 @@ +#### This file contains tests to evaluate that your bot behaves as expected. +#### If you want to learn more, please see the docs: https://rasa.com/docs/rasa/user-guide/testing-your-assistant/ + +## happy path 1 +* greet: hello there! + - utter_greet +* mood_great: amazing + - utter_happy + +## happy path 2 +* greet: hello there! + - utter_greet +* mood_great: amazing + - utter_happy +* goodbye: bye-bye! + - utter_goodbye + +## sad path 1 +* greet: hello + - utter_greet +* mood_unhappy: not good + - utter_cheer_up + - utter_did_that_help +* affirm: yes + - utter_happy + +## sad path 2 +* greet: hello + - utter_greet +* mood_unhappy: not good + - utter_cheer_up + - utter_did_that_help +* deny: not really + - utter_goodbye + +## sad path 3 +* greet: hi + - utter_greet +* mood_unhappy: very terrible + - utter_cheer_up + - utter_did_that_help +* deny: no + - utter_goodbye + +## say goodbye +* goodbye: bye-bye! + - utter_goodbye + +## bot challenge +* bot_challenge: are you a bot? + - utter_iamabot diff --git a/rasa/cli/interactive.py b/rasa/cli/interactive.py index 11e7f7063a73..25ab9f05c932 100644 --- a/rasa/cli/interactive.py +++ b/rasa/cli/interactive.py @@ -1,22 +1,21 @@ import argparse +import asyncio +import logging import os -from typing import List, Text +from typing import List, Optional, Text +from rasa.cli import utils import rasa.cli.train as train from rasa.cli.arguments import interactive as arguments -from rasa import data, model +from rasa import model +from rasa.constants import DEFAULT_MODELS_PATH, DEFAULT_ENDPOINTS_PATH +from rasa.importers.importer import TrainingDataImporter -# noinspection PyProtectedMember -from rasa.cli.utils import get_validated_path, print_error -from rasa.constants import ( - DEFAULT_DATA_PATH, - DEFAULT_MODELS_PATH, - DEFAULT_ENDPOINTS_PATH, -) -from rasa.model import get_latest_model +logger = logging.getLogger(__name__) +# noinspection PyProtectedMember def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): @@ -28,12 +27,7 @@ def add_subparser( help="Starts an interactive learning session to create new training data for a " "Rasa model by chatting.", ) - interactive_parser.set_defaults(func=interactive) - interactive_parser.add_argument( - "--e2e", - action="store_true", - help="Save story files in e2e format. In this format user messages will be included in the stories.", - ) + interactive_parser.set_defaults(func=interactive, core_only=False) interactive_subparsers = interactive_parser.add_subparsers() interactive_core_parser = interactive_subparsers.add_parser( @@ -45,77 +39,76 @@ def add_subparser( "for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. " "`/<intent>` input format.", ) - interactive_core_parser.set_defaults(func=interactive_core) + interactive_core_parser.set_defaults(func=interactive, core_only=True) arguments.set_interactive_arguments(interactive_parser) arguments.set_interactive_core_arguments(interactive_core_parser) -def interactive(args: argparse.Namespace): - args.fixed_model_name = None - args.store_uncompressed = False +def interactive(args: argparse.Namespace) -> None: + _set_not_required_args(args) + file_importer = TrainingDataImporter.load_from_config( + args.config, args.domain, args.data + ) if args.model is None: - check_training_data(args) - zipped_model = train.train(args) + loop = asyncio.get_event_loop() + story_graph = loop.run_until_complete(file_importer.get_stories()) + if not story_graph or story_graph.is_empty(): + utils.print_error_and_exit( + "Could not run interactive learning without either core data or a model containing core data." + ) + + zipped_model = train.train_core(args) if args.core_only else train.train(args) + if not zipped_model: + utils.print_error_and_exit( + "Could not train an initial model. Either pass paths " + "to the relevant training files (`--data`, `--config`, `--domain`), " + "or use 'rasa train' to train a model." + ) else: zipped_model = get_provided_model(args.model) + if not (zipped_model and os.path.exists(zipped_model)): + utils.print_error_and_exit( + f"Interactive learning process cannot be started as no initial model was " + f"found at path '{args.model}'. Use 'rasa train' to train a model." + ) + if not args.skip_visualization: + logger.info(f"Loading visualization data from {args.data}.") - perform_interactive_learning(args, zipped_model) + perform_interactive_learning(args, zipped_model, file_importer) -def interactive_core(args: argparse.Namespace): +def _set_not_required_args(args: argparse.Namespace) -> None: args.fixed_model_name = None args.store_uncompressed = False - if args.model is None: - zipped_model = train.train_core(args) - else: - zipped_model = get_provided_model(args.model) - - perform_interactive_learning(args, zipped_model) - -def perform_interactive_learning(args, zipped_model): +def perform_interactive_learning( + args: argparse.Namespace, zipped_model: Text, file_importer: TrainingDataImporter +) -> None: from rasa.core.train import do_interactive_learning - if zipped_model and os.path.exists(zipped_model): - args.model = zipped_model - - with model.unpack_model(zipped_model) as model_path: - args.core, args.nlu = model.get_model_subdirectories(model_path) - stories_directory = data.get_core_directory(args.data) + args.model = zipped_model - args.endpoints = get_validated_path( - args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True + with model.unpack_model(zipped_model) as model_path: + args.core, args.nlu = model.get_model_subdirectories(model_path) + if args.core is None: + utils.print_error_and_exit( + "Can not run interactive learning on an NLU-only model." ) - do_interactive_learning(args, stories_directory) - else: - print_error( - "Interactive learning process cannot be started as no initial model was " - "found. Use 'rasa train' to train a model." + args.endpoints = utils.get_validated_path( + args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True ) + do_interactive_learning(args, file_importer) + -def get_provided_model(arg_model: Text): - model_path = get_validated_path(arg_model, "model", DEFAULT_MODELS_PATH) +def get_provided_model(arg_model: Text) -> Optional[Text]: + model_path = utils.get_validated_path(arg_model, "model", DEFAULT_MODELS_PATH) if os.path.isdir(model_path): - model_path = get_latest_model(model_path) + model_path = model.get_latest_model(model_path) return model_path - - -def check_training_data(args): - training_files = [ - get_validated_path(f, "data", DEFAULT_DATA_PATH, none_is_valid=True) - for f in args.data - ] - story_files, nlu_files = data.get_core_nlu_files(training_files) - if not story_files or not nlu_files: - print_error( - "Cannot train initial Rasa model. Please provide NLU and Core data " - "using the '--data' argument." - ) - exit(1) diff --git a/rasa/cli/run.py b/rasa/cli/run.py index 10c81e0b51b6..f9e8910f53d0 100644 --- a/rasa/cli/run.py +++ b/rasa/cli/run.py @@ -49,22 +49,17 @@ def run_actions(args: argparse.Namespace): args.actions = args.actions or DEFAULT_ACTIONS_PATH - path = args.actions.replace(".", os.sep) + ".py" - _ = get_validated_path(path, "action", DEFAULT_ACTIONS_PATH) - sdk.main_from_args(args) def _validate_model_path(model_path: Text, parameter: Text, default: Text): if model_path is not None and not os.path.exists(model_path): - reason_str = "'{}' not found.".format(model_path) + reason_str = f"'{model_path}' not found." if model_path is None: - reason_str = "Parameter '{}' not set.".format(parameter) + reason_str = f"Parameter '{parameter}' not set." - logger.debug( - "{} Using default location '{}' instead.".format(reason_str, default) - ) + logger.debug(f"{reason_str} Using default location '{default}' instead.") os.makedirs(default, exist_ok=True) model_path = default @@ -128,6 +123,6 @@ def run(args: argparse.Namespace): "3. Train a model before running the server using `rasa train` and " "use '--model' to provide the model path.\n" "For more information check {}.".format( - DOCS_BASE_URL + "/user-guide/running-the-server/" + DOCS_BASE_URL + "/user-guide/configuring-http-api/" ) ) diff --git a/rasa/cli/scaffold.py b/rasa/cli/scaffold.py index 5b0ebb9a8bed..64766bea4504 100644 --- a/rasa/cli/scaffold.py +++ b/rasa/cli/scaffold.py @@ -4,7 +4,7 @@ import rasa.train from rasa.cli.shell import shell -from rasa.cli.utils import create_output_path, print_success +from rasa.cli.utils import create_output_path, print_success, print_error_and_exit from rasa.constants import ( DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH, @@ -28,6 +28,12 @@ def add_subparser( action="store_true", help="Automatically choose default options for prompts and suppress warnings.", ) + scaffold_parser.add_argument( + "--init-dir", + default=None, + help="Directory where your project should be initialized.", + ) + scaffold_parser.set_defaults(func=run) @@ -36,9 +42,11 @@ def print_train_or_instructions(args: argparse.Namespace, path: Text) -> None: print_success("Finished creating project structure.") - should_train = questionary.confirm( - "Do you want to train an initial model? 💪🏽" - ).skip_if(args.no_prompt, default=True) + should_train = ( + questionary.confirm("Do you want to train an initial model? 💪🏽") + .skip_if(args.no_prompt, default=True) + .ask() + ) if should_train: print_success("Training an initial model...") @@ -55,7 +63,6 @@ def print_train_or_instructions(args: argparse.Namespace, path: Text) -> None: print_success( "No problem 👍🏼. You can also train a model later by going " "to the project directory and running 'rasa train'." - "".format(path) ) @@ -91,11 +98,10 @@ def print_run_or_instructions(args: argparse.Namespace, path: Text) -> None: shell(args) else: if args.no_prompt: - print ( + print( "If you want to speak to the assistant, " "run 'rasa shell' at any time inside " "the project directory." - "".format(path) ) else: print_success( @@ -103,13 +109,12 @@ def print_run_or_instructions(args: argparse.Namespace, path: Text) -> None: "If you want to speak to the assistant, " "run 'rasa shell' at any time inside " "the project directory." - "".format(path) ) def init_project(args: argparse.Namespace, path: Text) -> None: create_initial_project(path) - print ("Created project directory at '{}'.".format(os.path.abspath(path))) + print("Created project directory at '{}'.".format(os.path.abspath(path))) print_train_or_instructions(args, path) @@ -134,7 +139,7 @@ def _ask_create_path(path: Text) -> None: import questionary should_create = questionary.confirm( - "Path '{}' does not exist 🧐. Create path?".format(path) + f"Path '{path}' does not exist 🧐. Create path?" ).ask() if should_create: os.makedirs(path) @@ -158,14 +163,14 @@ def run(args: argparse.Namespace) -> None: print_success("Welcome to Rasa! 🤖\n") if args.no_prompt: - print ( + print( "To get started quickly, an " "initial project will be created.\n" "If you need some help, check out " "the documentation at {}.\n".format(DOCS_BASE_URL) ) else: - print ( + print( "To get started quickly, an " "initial project will be created.\n" "If you need some help, check out " @@ -173,17 +178,23 @@ def run(args: argparse.Namespace) -> None: "Now let's start! 👇🏽\n".format(DOCS_BASE_URL) ) - path = ( - questionary.text( - "Please enter a path where the project will be " - "created [default: current directory]", - default=".", + if args.init_dir is not None: + path = args.init_dir + else: + path = ( + questionary.text( + "Please enter a path where the project will be " + "created [default: current directory]", + default=".", + ) + .skip_if(args.no_prompt, default=".") + .ask() ) - .skip_if(args.no_prompt, default=".") - .ask() - ) - if not os.path.isdir(path): + if args.no_prompt and not os.path.isdir(path): + print_error_and_exit(f"Project init path '{path}' not found.") + + if path and not os.path.isdir(path): _ask_create_path(path) if path is None or not os.path.isdir(path): diff --git a/rasa/cli/shell.py b/rasa/cli/shell.py index ebbe4ac0dc1e..5410c8826e70 100644 --- a/rasa/cli/shell.py +++ b/rasa/cli/shell.py @@ -1,5 +1,6 @@ import argparse import logging +import uuid from typing import List @@ -7,7 +8,6 @@ from rasa.cli.utils import print_error from rasa.exceptions import ModelNotFound - logger = logging.getLogger(__name__) @@ -22,11 +22,22 @@ def add_subparser( parents=parents, conflict_handler="resolve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, - help="Loads your trained model and lets you talk to your assistant on the command line.", + help=( + "Loads your trained model and lets you talk to your " + "assistant on the command line." + ), ) shell_parser.set_defaults(func=shell) + shell_parser.add_argument( + "--conversation-id", + default=uuid.uuid4().hex, + required=False, + help="Set the conversation ID.", + ) + run_subparsers = shell_parser.add_subparsers() + shell_nlu_subparser = run_subparsers.add_parser( "nlu", parents=parents, @@ -34,6 +45,7 @@ def add_subparser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="Interprets messages on the command line using your NLU model.", ) + shell_nlu_subparser.set_defaults(func=shell_nlu) arguments.set_shell_arguments(shell_parser) diff --git a/rasa/cli/test.py b/rasa/cli/test.py index b2b06cf858ee..0666da94adf2 100644 --- a/rasa/cli/test.py +++ b/rasa/cli/test.py @@ -7,6 +7,7 @@ from rasa.constants import ( DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH, + DEFAULT_E2E_TESTS_PATH, DEFAULT_ENDPOINTS_PATH, DEFAULT_MODELS_PATH, DEFAULT_RESULTS_PATH, @@ -23,6 +24,7 @@ def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): + """Adds a test subparser.""" test_parser = subparsers.add_parser( "test", parents=parents, @@ -51,12 +53,13 @@ def add_subparser( ) arguments.set_test_nlu_arguments(test_nlu_parser) - test_core_parser.set_defaults(func=test_core) - test_nlu_parser.set_defaults(func=test_nlu) - test_parser.set_defaults(func=test) + test_core_parser.set_defaults(func=run_core_test) + test_nlu_parser.set_defaults(func=run_nlu_test) + test_parser.set_defaults(func=test, stories=DEFAULT_E2E_TESTS_PATH) -def test_core(args: argparse.Namespace) -> None: +def run_core_test(args: argparse.Namespace) -> None: + """Run core tests.""" from rasa import data from rasa.test import test_core_models_in_directory, test_core, test_core_models @@ -66,6 +69,7 @@ def test_core(args: argparse.Namespace) -> None: stories = cli_utils.get_validated_path(args.stories, "stories", DEFAULT_DATA_PATH) stories = data.get_core_directory(stories) output = args.out or DEFAULT_RESULTS_PATH + args.errors = not args.no_errors io_utils.create_directory(output) @@ -85,14 +89,15 @@ def test_core(args: argparse.Namespace) -> None: stories=stories, endpoints=endpoints, output=output, - kwargs=vars(args), + additional_arguments=vars(args), ) else: test_core_models(args.model, stories, output) -def test_nlu(args: argparse.Namespace) -> None: +def run_nlu_test(args: argparse.Namespace) -> None: + """Run NLU tests.""" from rasa import data from rasa.test import compare_nlu_models, perform_nlu_cross_validation, test_nlu @@ -124,7 +129,7 @@ def test_nlu(args: argparse.Namespace) -> None: config_files.append(file) except validation_utils.InvalidYamlFileError: logger.debug( - "Ignoring file '{}' as it is not a valid config file.".format(file) + f"Ignoring file '{file}' as it is not a valid config file." ) continue @@ -150,5 +155,7 @@ def test_nlu(args: argparse.Namespace) -> None: def test(args: argparse.Namespace): - test_core(args) - test_nlu(args) + """Run end-to-end tests.""" + setattr(args, "e2e", True) + run_core_test(args) + run_nlu_test(args) diff --git a/rasa/cli/train.py b/rasa/cli/train.py index f5c4bf26335d..0ffff340f074 100644 --- a/rasa/cli/train.py +++ b/rasa/cli/train.py @@ -72,7 +72,9 @@ def train(args: argparse.Namespace) -> Optional[Text]: output=args.out, force_training=args.force, fixed_model_name=args.fixed_model_name, - kwargs=extract_additional_arguments(args), + persist_nlu_training_data=args.persist_nlu_data, + core_additional_arguments=extract_core_additional_arguments(args), + nlu_additional_arguments=extract_nlu_additional_arguments(args), ) @@ -91,7 +93,7 @@ def train_core( story_file = get_validated_path( args.stories, "stories", DEFAULT_DATA_PATH, none_is_valid=True ) - kwargs = extract_additional_arguments(args) + additional_arguments = extract_core_additional_arguments(args) # Policies might be a list for the compare training. Do normal training # if only list item was passed. @@ -108,12 +110,14 @@ def train_core( output=output, train_path=train_path, fixed_model_name=args.fixed_model_name, - kwargs=kwargs, + additional_arguments=additional_arguments, ) else: from rasa.core.train import do_compare_training - loop.run_until_complete(do_compare_training(args, story_file, kwargs)) + loop.run_until_complete( + do_compare_training(args, story_file, additional_arguments) + ) def train_nlu( @@ -134,27 +138,47 @@ def train_nlu( output=output, train_path=train_path, fixed_model_name=args.fixed_model_name, + persist_nlu_training_data=args.persist_nlu_data, + additional_arguments=extract_nlu_additional_arguments(args), ) -def extract_additional_arguments(args: argparse.Namespace) -> Dict: +def extract_core_additional_arguments(args: argparse.Namespace) -> Dict: arguments = {} if "augmentation" in args: arguments["augmentation_factor"] = args.augmentation - if "dump_stories" in args: - arguments["dump_stories"] = args.dump_stories if "debug_plots" in args: arguments["debug_plots"] = args.debug_plots return arguments +def extract_nlu_additional_arguments(args: argparse.Namespace) -> Dict: + arguments = {} + + if "num_threads" in args: + arguments["num_threads"] = args.num_threads + + return arguments + + def _get_valid_config( config: Optional[Text], mandatory_keys: List[Text], default_config: Text = DEFAULT_CONFIG_PATH, ) -> Text: + """Get a config from a config file and check if it is valid. + + Exit if the config isn't valid. + + Args: + config: Path to the config file. + mandatory_keys: The keys that have to be specified in the config file. + default_config: default config to use if the file at `config` doesn't exist. + + Returns: The path to the config file if the config is valid. + """ config = get_validated_path(config, "config", default_config) if not os.path.exists(config): diff --git a/rasa/cli/utils.py b/rasa/cli/utils.py index 6dac36ae6aec..043daa32f337 100644 --- a/rasa/cli/utils.py +++ b/rasa/cli/utils.py @@ -1,13 +1,14 @@ -import os -import sys import json -from typing import Any, Optional, Text, List, Dict, TYPE_CHECKING import logging +import os +import sys +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Text if TYPE_CHECKING: from questionary import Question from rasa.constants import DEFAULT_MODELS_PATH +from typing import NoReturn logger = logging.getLogger(__name__) @@ -36,19 +37,18 @@ def get_validated_path( """ if current is None or current is not None and not os.path.exists(current): if default is not None and os.path.exists(default): - reason_str = "'{}' not found.".format(current) + reason_str = f"'{current}' not found." if current is None: - reason_str = "Parameter '{}' not set.".format(parameter) + reason_str = f"Parameter '{parameter}' not set." else: - logger.warning( - "'{}' does not exist. Using default value '{}' instead.".format( - current, default - ) + from rasa.utils.common import raise_warning # avoid import cycle + + raise_warning( + f"The path '{current}' does not seem to exist. Using the " + f"default value '{default}' instead." ) - logger.debug( - "{} Using default location '{}' instead.".format(reason_str, default) - ) + logger.debug(f"{reason_str} Using default location '{default}' instead.") current = default elif none_is_valid: current = None @@ -58,7 +58,7 @@ def get_validated_path( return current -def missing_config_keys(path: Text, mandatory_keys: List[Text]) -> List: +def missing_config_keys(path: Text, mandatory_keys: List[Text]) -> List[Text]: import rasa.utils.io if not os.path.exists(path): @@ -83,7 +83,7 @@ def cancel_cause_not_found( default_clause = "" if default: - default_clause = "use the default location ('{}') or ".format(default) + default_clause = f"use the default location ('{default}') or " print_error( "The path '{}' does not exist. Please make sure to {}specify it" " with '--{}'.".format(current, default_clause, parameter) @@ -97,7 +97,10 @@ def parse_last_positional_argument_as_model_path() -> None: if ( len(sys.argv) >= 2 + # support relevant commands ... and sys.argv[1] in ["run", "shell", "interactive"] + # but avoid interpreting subparser commands as model paths + and sys.argv[1:] != ["run", "actions"] and not sys.argv[-2].startswith("-") and os.path.exists(sys.argv[-1]) ): @@ -130,8 +133,8 @@ def create_output_path( else: time_format = "%Y%m%d-%H%M%S" name = time.strftime(time_format) - name = "{}{}".format(prefix, name) - file_name = "{}.tar.gz".format(name) + name = f"{prefix}{name}" + file_name = f"{name}.tar.gz" return os.path.join(output_path, file_name) @@ -196,7 +199,7 @@ def payload_from_button_question(button_question: "Question") -> Text: return response -class bcolors(object): +class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" @@ -212,7 +215,7 @@ def wrap_with_color(*args: Any, color: Text): def print_color(*args: Any, color: Text): - print (wrap_with_color(*args, color=color)) + print(wrap_with_color(*args, color=color)) def print_success(*args: Any): @@ -231,13 +234,13 @@ def print_error(*args: Any): print_color(*args, color=bcolors.FAIL) -def print_error_and_exit(message: Text, exit_code: int = 1) -> None: +def print_error_and_exit(message: Text, exit_code: int = 1) -> NoReturn: """Print error message and exit the application.""" print_error(message) sys.exit(exit_code) -def signal_handler(sig, frame): - print ("Goodbye 👋") +def signal_handler(sig, frame) -> NoReturn: + print("Goodbye 👋") sys.exit(0) diff --git a/rasa/cli/x.py b/rasa/cli/x.py index d124dedcb260..dd7bc7dfb735 100644 --- a/rasa/cli/x.py +++ b/rasa/cli/x.py @@ -6,7 +6,7 @@ import signal import traceback from multiprocessing import get_context -from typing import List, Text, Optional, Tuple, Union, Iterable +from typing import List, Text, Optional, Tuple, Iterable import aiohttp import ruamel.yaml as yaml @@ -22,6 +22,7 @@ DEFAULT_LOG_LEVEL_RASA_X, DEFAULT_RASA_X_PORT, DEFAULT_RASA_PORT, + DOCS_BASE_URL_RASA_X, ) from rasa.core.utils import AvailableEndpoints from rasa.utils.endpoints import EndpointConfig @@ -59,11 +60,11 @@ def _rasa_service( ): """Starts the Rasa application.""" from rasa.core.run import serve_application + import rasa.utils.common # needs separate logging configuration as it is started in its own process - logging.basicConfig(level=args.loglevel) + rasa.utils.common.set_log_level(args.loglevel) io_utils.configure_colored_logging(args.loglevel) - logging.getLogger("apscheduler").setLevel(logging.WARNING) if not credentials_path: credentials_path = _prepare_credentials_for_rasa_x( @@ -79,6 +80,10 @@ def _rasa_service( enable_api=True, jwt_secret=args.jwt_secret, jwt_method=args.jwt_method, + ssl_certificate=args.ssl_certificate, + ssl_keyfile=args.ssl_keyfile, + ssl_ca_file=args.ssl_ca_file, + ssl_password=args.ssl_password, ) @@ -105,32 +110,66 @@ def _prepare_credentials_for_rasa_x( def _overwrite_endpoints_for_local_x( endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text ): - from rasa.utils.endpoints import EndpointConfig - import questionary + endpoints.model = _get_model_endpoint(endpoints.model, rasa_x_token, rasa_x_url) + endpoints.event_broker = _get_event_broker_endpoint(endpoints.event_broker) + + +def _get_model_endpoint( + model_endpoint: Optional[EndpointConfig], rasa_x_token: Text, rasa_x_url: Text +) -> EndpointConfig: + # If you change that, please run a test with Rasa X and speak to the bot + default_rasax_model_server_url = ( + f"{rasa_x_url}/projects/default/models/tags/production" + ) + + model_endpoint = model_endpoint or EndpointConfig() - endpoints.model = EndpointConfig( - "{}/projects/default/models/tags/production".format(rasa_x_url), + # Checking if endpoint.yml has existing url, if so give + # warning we are overwriting the endpoint.yml file. + custom_url = model_endpoint.url + + if custom_url and custom_url != default_rasax_model_server_url: + logger.info( + f"Ignoring url '{custom_url}' from 'endpoints.yml' and using " + f"'{default_rasax_model_server_url}' instead." + ) + + custom_wait_time_pulls = model_endpoint.kwargs.get("wait_time_between_pulls") + return EndpointConfig( + default_rasax_model_server_url, token=rasa_x_token, - wait_time_between_pulls=2, + wait_time_between_pulls=custom_wait_time_pulls or 2, ) - overwrite_existing_event_broker = False - if endpoints.event_broker and not _is_correct_event_broker(endpoints.event_broker): + +def _get_event_broker_endpoint( + event_broker_endpoint: Optional[EndpointConfig], +) -> EndpointConfig: + import questionary + + default_event_broker_endpoint = EndpointConfig( + type="sql", dialect="sqlite", db=DEFAULT_EVENTS_DB + ) + if not event_broker_endpoint: + return default_event_broker_endpoint + elif not _is_correct_event_broker(event_broker_endpoint): cli_utils.print_error( - "Rasa X currently only supports a SQLite event broker with path '{}' " - "when running locally. You can deploy Rasa X with Docker " - "(https://rasa.com/docs/rasa-x/deploy/) if you want to use " - "other event broker configurations.".format(DEFAULT_EVENTS_DB) + f"Rasa X currently only supports a SQLite event broker with path " + f"'{DEFAULT_EVENTS_DB}' when running locally. You can deploy Rasa X " + f"with Docker ({DOCS_BASE_URL_RASA_X}/installation-and-setup/" + f"docker-compose-quick-install/) if you want to use other event broker " + f"configurations." ) - overwrite_existing_event_broker = questionary.confirm( + continue_with_default_event_broker = questionary.confirm( "Do you want to continue with the default SQLite event broker?" ).ask() - if not overwrite_existing_event_broker: + if not continue_with_default_event_broker: exit(0) - if not endpoints.tracker_store or overwrite_existing_event_broker: - endpoints.event_broker = EndpointConfig(type="sql", db=DEFAULT_EVENTS_DB) + return default_event_broker_endpoint + else: + return event_broker_endpoint def _is_correct_event_broker(event_broker: EndpointConfig) -> bool: @@ -149,7 +188,7 @@ def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text): credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args) endpoints = AvailableEndpoints.read_endpoints(endpoints_path) - rasa_x_url = "http://localhost:{}/api".format(args.rasa_x_port) + rasa_x_url = f"http://localhost:{args.rasa_x_port}/api" _overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url) vars(args).update( @@ -171,7 +210,7 @@ def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text): return p -def is_rasa_x_installed(): +def is_rasa_x_installed() -> bool: """Check if Rasa X is installed.""" # we could also do something like checking if `import rasax` works, @@ -216,8 +255,9 @@ def _configure_logging(args: argparse.Namespace): logging.getLogger("py.warnings").setLevel(logging.ERROR) -def is_rasa_project_setup(project_path: Text): - mandatory_files = [DEFAULT_CONFIG_PATH, DEFAULT_DOMAIN_PATH] +def is_rasa_project_setup(args: argparse.Namespace, project_path: Text) -> bool: + config_path = _get_config_path(args) + mandatory_files = [config_path, DEFAULT_DOMAIN_PATH] for f in mandatory_files: if not os.path.exists(os.path.join(project_path, f)): @@ -232,7 +272,7 @@ def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text): "Rasa X is not installed. The `rasa x` " "command requires an installation of Rasa X. " "Instructions on how to install Rasa X can be found here: " - "https://rasa.com/docs/rasa-x/installation-and-setup/." + "https://rasa.com/docs/rasa-x/." ) if args.port == args.rasa_x_port: @@ -245,7 +285,7 @@ def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text): ) ) - if not is_rasa_project_setup(project_path): + if not is_rasa_project_setup(args, project_path): cli_utils.print_error_and_exit( "This directory is not a valid Rasa project. Use 'rasa init' " "to create a new Rasa project or switch to a valid Rasa project " @@ -289,7 +329,7 @@ def rasa_x(args: argparse.Namespace): async def _pull_runtime_config_from_server( config_endpoint: Optional[Text], attempts: int = 60, - wait_time_between_pulls: Union[int, float] = 5, + wait_time_between_pulls: float = 5, keys: Iterable[Text] = ("endpoints", "credentials"), ) -> Optional[List[Text]]: """Pull runtime config from `config_endpoint`. @@ -320,7 +360,7 @@ async def _pull_runtime_config_from_server( "".format(resp.status, await resp.text()) ) except aiohttp.ClientError as e: - logger.debug("Failed to connect to server. Retrying. {}".format(e)) + logger.debug(f"Failed to connect to server. Retrying. {e}") await asyncio.sleep(wait_time_between_pulls) attempts -= 1 @@ -342,8 +382,16 @@ def run_in_production(args: argparse.Namespace): _rasa_service(args, endpoints, None, credentials_path) +def _get_config_path(args: argparse.Namespace,) -> Optional[Text]: + config_path = cli_utils.get_validated_path( + args.config, "config", DEFAULT_CONFIG_PATH + ) + + return config_path + + def _get_credentials_and_endpoints_paths( - args: argparse.Namespace + args: argparse.Namespace, ) -> Tuple[Optional[Text], Optional[Text]]: config_endpoint = args.config_endpoint if config_endpoint: @@ -376,10 +424,14 @@ def run_locally(args: argparse.Namespace): rasa_x_token = generate_rasa_x_token() process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token) + config_path = _get_config_path(args) + try: - local.main(args, project_path, args.data, token=rasa_x_token) + local.main( + args, project_path, args.data, token=rasa_x_token, config_path=config_path + ) except Exception: - print (traceback.format_exc()) + print(traceback.format_exc()) cli_utils.print_error( "Sorry, something went wrong (see error above). Make sure to start " "Rasa X with valid data and valid domain and config files. Please, " diff --git a/rasa/constants.py b/rasa/constants.py index ed39875e4c64..e3fc0dda8c48 100644 --- a/rasa/constants.py +++ b/rasa/constants.py @@ -7,12 +7,17 @@ DEFAULT_ACTIONS_PATH = "actions" DEFAULT_MODELS_PATH = "models" DEFAULT_DATA_PATH = "data" +DEFAULT_E2E_TESTS_PATH = "tests" DEFAULT_RESULTS_PATH = "results" DEFAULT_NLU_RESULTS_PATH = "nlu_comparison_results" +DEFAULT_CORE_SUBDIRECTORY_NAME = "core" +DEFAULT_NLU_SUBDIRECTORY_NAME = "nlu" DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes +DEFAULT_RESPONSE_TIMEOUT = 60 * 60 # 1 hour TEST_DATA_FILE = "test.md" TRAIN_DATA_FILE = "train.md" +NLG_DATA_FILE = "responses.md" RESULTS_FILE = "results.json" NUMBER_OF_TRAINING_STORIES_FILE = "num_stories.json" PERCENTAGE_KEY = "__percentage__" @@ -21,18 +26,43 @@ CONFIG_SCHEMA_FILE = "nlu/schemas/config.yml" DOMAIN_SCHEMA_FILE = "core/schemas/domain.yml" +YAML_VERSION = (1, 2) DEFAULT_RASA_X_PORT = 5002 DEFAULT_RASA_PORT = 5005 DOCS_BASE_URL = "https://rasa.com/docs/rasa" +DOCS_URL_POLICIES = DOCS_BASE_URL + "/core/policies/" +DOCS_URL_DOMAINS = DOCS_BASE_URL + "/core/domains/" +DOCS_URL_STORIES = DOCS_BASE_URL + "/core/stories/" +DOCS_URL_RULES = DOCS_BASE_URL + "/core/rules/" +DOCS_URL_ACTIONS = DOCS_BASE_URL + "/core/actions/" +DOCS_URL_CONNECTORS = DOCS_BASE_URL + "/user-guide/connectors/" +DOCS_URL_EVENT_BROKERS = DOCS_BASE_URL + "/api/event-brokers/" +DOCS_URL_PIKA_EVENT_BROKER = DOCS_URL_EVENT_BROKERS + "#pika-event-broker" +DOCS_URL_TRACKER_STORES = DOCS_BASE_URL + "/api/tracker-stores/" +DOCS_URL_PIPELINE = DOCS_BASE_URL + "/nlu/choosing-a-pipeline/" +DOCS_URL_COMPONENTS = DOCS_BASE_URL + "/nlu/components/" +DOCS_URL_TRAINING_DATA_NLU = DOCS_BASE_URL + "/nlu/training-data-format/" +DOCS_URL_MIGRATE_GOOGLE = DOCS_BASE_URL + "/migrate-from/google-dialogflow-to-rasa/" +DOCS_URL_MIGRATION_GUIDE = DOCS_BASE_URL + "/migration-guide/" + +DOCS_BASE_URL_RASA_X = "https://rasa.com/docs/rasa-x" + LEGACY_DOCS_BASE_URL = "http://legacy-docs.rasa.com" -CONFIG_MANDATORY_KEYS_CORE = ["policies"] -CONFIG_MANDATORY_KEYS_NLU = ["language", "pipeline"] +CONFIG_KEYS_CORE = ["policies"] +CONFIG_KEYS_NLU = ["language", "pipeline"] +CONFIG_KEYS = CONFIG_KEYS_CORE + CONFIG_KEYS_NLU +CONFIG_MANDATORY_KEYS_CORE = [] +CONFIG_MANDATORY_KEYS_NLU = ["language"] CONFIG_MANDATORY_KEYS = CONFIG_MANDATORY_KEYS_CORE + CONFIG_MANDATORY_KEYS_NLU +CONFIG_AUTOCONFIGURABLE_KEYS = ["policies", "pipeline"] + + +MINIMUM_COMPATIBLE_VERSION = "1.11.0a3" -MINIMUM_COMPATIBLE_VERSION = "1.3.0a2" +LATEST_TRAINING_DATA_FORMAT_VERSION = "2.0" GLOBAL_USER_CONFIG_PATH = os.path.expanduser("~/.config/rasa/global.yml") @@ -41,3 +71,16 @@ DEFAULT_LOG_LEVEL_LIBRARIES = "ERROR" ENV_LOG_LEVEL = "LOG_LEVEL" ENV_LOG_LEVEL_LIBRARIES = "LOG_LEVEL_LIBRARIES" + +DEFAULT_SANIC_WORKERS = 1 +ENV_SANIC_WORKERS = "SANIC_WORKERS" +ENV_SANIC_BACKLOG = "SANIC_BACKLOG" + +DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES = 0 +DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION = True + +ENV_GPU_CONFIG = "TF_GPU_MEMORY_ALLOC" +ENV_CPU_INTER_OP_CONFIG = "TF_INTER_OP_PARALLELISM_THREADS" +ENV_CPU_INTRA_OP_CONFIG = "TF_INTRA_OP_PARALLELISM_THREADS" + +DEFAULT_NLU_FALLBACK_INTENT_NAME = "nlu_fallback" diff --git a/rasa/core/__init__.py b/rasa/core/__init__.py index 0cafb1aeef34..0f76ff881aef 100644 --- a/rasa/core/__init__.py +++ b/rasa/core/__init__.py @@ -3,7 +3,6 @@ import rasa from rasa.core.train import train -from rasa.core.test import test from rasa.core.visualize import visualize logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/rasa/core/actions/action.py b/rasa/core/actions/action.py index 88cf5226cf05..d6214e881b75 100644 --- a/rasa/core/actions/action.py +++ b/rasa/core/actions/action.py @@ -7,7 +7,7 @@ import aiohttp import rasa.core -from rasa.constants import DOCS_BASE_URL +from rasa.constants import DOCS_BASE_URL, DEFAULT_NLU_FALLBACK_INTENT_NAME from rasa.core import events from rasa.core.constants import ( DEFAULT_REQUEST_TIMEOUT, @@ -19,7 +19,9 @@ from rasa.nlu.constants import ( DEFAULT_OPEN_UTTERANCE_TYPE, OPEN_UTTERANCE_PREDICTION_KEY, - MESSAGE_SELECTOR_PROPERTY_NAME, + RESPONSE_SELECTOR_PROPERTY_NAME, + INTENT_RANKING_KEY, + INTENT_NAME_KEY, ) from rasa.core.events import ( @@ -28,6 +30,10 @@ ActionExecuted, Event, BotUttered, + SlotSet, + ActiveLoop, + Restarted, + SessionStarted, ) from rasa.utils.endpoints import EndpointConfig, ClientResponseError @@ -43,6 +49,8 @@ ACTION_RESTART_NAME = "action_restart" +ACTION_SESSION_START_NAME = "action_session_start" + ACTION_DEFAULT_FALLBACK_NAME = "action_default_fallback" ACTION_DEACTIVATE_FORM_NAME = "action_deactivate_form" @@ -55,27 +63,35 @@ ACTION_BACK_NAME = "action_back" +RULE_SNIPPET_ACTION_NAME = "..." + +FULL_RETRIEVAL_INTENT = "full_retrieval_intent" -def default_actions() -> List["Action"]: + +def default_actions(action_endpoint: Optional[EndpointConfig] = None) -> List["Action"]: """List default actions.""" + from rasa.core.actions.two_stage_fallback import TwoStageFallbackAction + return [ ActionListen(), ActionRestart(), + ActionSessionStart(), ActionDefaultFallback(), ActionDeactivateForm(), ActionRevertFallbackEvents(), ActionDefaultAskAffirmation(), ActionDefaultAskRephrase(), + TwoStageFallbackAction(action_endpoint), ActionBack(), ] def default_action_names() -> List[Text]: """List default action names.""" - return [a.name() for a in default_actions()] + return [a.name() for a in default_actions()] + [RULE_SNIPPET_ACTION_NAME] -def combine_user_with_default_actions(user_actions): +def combine_user_with_default_actions(user_actions: List[Text]) -> List[Text]: # remove all user actions that overwrite default actions # this logic is a bit reversed, you'd think that we should remove # the action name from the default action names if the user overwrites @@ -83,16 +99,30 @@ def combine_user_with_default_actions(user_actions): # implicitly assume that e.g. "action_listen" is always at location # 0 in this array. to keep it that way, we remove the duplicate # action names from the users list instead of the defaults - unique_user_actions = [a for a in user_actions if a not in default_action_names()] - return default_action_names() + unique_user_actions + defaults = default_action_names() + unique_user_actions = [a for a in user_actions if a not in defaults] + return defaults + unique_user_actions + + +def combine_with_templates( + actions: List[Text], templates: Dict[Text, Any] +) -> List[Text]: + """Combines actions with utter actions listed in responses section.""" + unique_template_names = [ + a for a in sorted(list(templates.keys())) if a not in actions + ] + return actions + unique_template_names def action_from_name( - name: Text, action_endpoint: Optional[EndpointConfig], user_actions: List[Text] + name: Text, + action_endpoint: Optional[EndpointConfig], + user_actions: List[Text], + should_use_form_action: bool = False, ) -> "Action": """Return an action instance for the name.""" - defaults = {a.name(): a for a in default_actions()} + defaults = {a.name(): a for a in default_actions(action_endpoint)} if name in defaults and name not in user_actions: return defaults[name] @@ -100,6 +130,10 @@ def action_from_name( return ActionUtterTemplate(name) elif name.startswith(RESPOND_PREFIX): return ActionRetrieveResponse(name) + elif should_use_form_action: + from rasa.core.actions.forms import FormAction + + return FormAction(name, action_endpoint) else: return RemoteAction(name, action_endpoint) @@ -139,7 +173,7 @@ def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered: return bot_message -class Action(object): +class Action: """Next action to be taken in response to a dialogue state.""" def name(self) -> Text: @@ -165,7 +199,8 @@ async def run( ``tracker.get_slot(slot_name)`` and the most recent user message is ``tracker.latest_message.text``. domain (Domain): the bot's domain - + metadata: dictionary that can be sent to action server with custom + data. Returns: List[Event]: A list of :class:`rasa.core.events.Event` instances """ @@ -183,7 +218,7 @@ def __init__(self, name: Text, silent_fail: Optional[bool] = False): self.action_name = name self.silent_fail = silent_fail - def intent_name_from_action(self): + def intent_name_from_action(self) -> Text: return self.action_name.split(RESPOND_PREFIX)[1] async def run( @@ -196,7 +231,7 @@ async def run( """Query the appropriate response and create a bot utterance with that.""" response_selector_properties = tracker.latest_message.parse_data[ - MESSAGE_SELECTOR_PROPERTY_NAME + RESPONSE_SELECTOR_PROPERTY_NAME ] if self.intent_name_from_action() in response_selector_properties: @@ -211,12 +246,11 @@ async def run( ) return [] - logger.debug("Picking response from selector of type {}".format(query_key)) - message = { - "text": response_selector_properties[query_key][ - OPEN_UTTERANCE_PREDICTION_KEY - ]["name"] - } + logger.debug(f"Picking response from selector of type {query_key}") + selected = response_selector_properties[query_key] + message = selected[OPEN_UTTERANCE_PREDICTION_KEY] + message["template_name"] = selected[FULL_RETRIEVAL_INTENT] + return [create_bot_utterance(message)] def name(self) -> Text: @@ -232,21 +266,28 @@ class ActionUtterTemplate(Action): Both, name and utter template, need to be specified using the `name` method.""" - def __init__(self, name, silent_fail: Optional[bool] = False): + def __init__(self, name: Text, silent_fail: Optional[bool] = False): self.template_name = name self.silent_fail = silent_fail - async def run(self, output_channel, nlg, tracker, domain): + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: """Simple run implementation uttering a (hopefully defined) template.""" message = await nlg.generate(self.template_name, tracker, output_channel.name()) if message is None: if not self.silent_fail: logger.error( - "Couldn't create message for template '{}'." + "Couldn't create message for response '{}'." "".format(self.template_name) ) return [] + message["template_name"] = self.template_name return [create_bot_utterance(message)] @@ -263,12 +304,18 @@ class ActionBack(ActionUtterTemplate): def name(self) -> Text: return ACTION_BACK_NAME - def __init__(self): - super(ActionBack, self).__init__("utter_back", silent_fail=True) + def __init__(self) -> None: + super().__init__("utter_back", silent_fail=True) - async def run(self, output_channel, nlg, tracker, domain): - # only utter the template if it is available - evts = await super(ActionBack, self).run(output_channel, nlg, tracker, domain) + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + # only utter the response if it is available + evts = await super().run(output_channel, nlg, tracker, domain) return evts + [UserUtteranceReverted(), UserUtteranceReverted()] @@ -282,32 +329,82 @@ class ActionListen(Action): def name(self) -> Text: return ACTION_LISTEN_NAME - async def run(self, output_channel, nlg, tracker, domain): + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: return [] class ActionRestart(ActionUtterTemplate): """Resets the tracker to its initial state. - Utters the restart template if available.""" + Utters the restart response if available.""" def name(self) -> Text: return ACTION_RESTART_NAME - def __init__(self): - super(ActionRestart, self).__init__("utter_restart", silent_fail=True) + def __init__(self) -> None: + super().__init__("utter_restart", silent_fail=True) - async def run(self, output_channel, nlg, tracker, domain): - from rasa.core.events import Restarted - - # only utter the template if it is available - evts = await super(ActionRestart, self).run( - output_channel, nlg, tracker, domain - ) + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + # only utter the response if it is available + evts = await super().run(output_channel, nlg, tracker, domain) return evts + [Restarted()] +class ActionSessionStart(Action): + """Applies a conversation session start. + + Takes all `SlotSet` events from the previous session and applies them to the new + session. + """ + + # Optional arbitrary metadata that can be passed to the SessionStarted event. + metadata: Optional[Dict[Text, Any]] = None + + def name(self) -> Text: + return ACTION_SESSION_START_NAME + + @staticmethod + def _slot_set_events_from_tracker( + tracker: "DialogueStateTracker", + ) -> List["SlotSet"]: + """Fetch SlotSet events from tracker and carry over key, value and metadata.""" + + return [ + SlotSet(key=event.key, value=event.value, metadata=event.metadata) + for event in tracker.applied_events() + if isinstance(event, SlotSet) + ] + + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + _events = [SessionStarted(metadata=self.metadata)] + + if domain.session_config.carry_over_slots: + _events.extend(self._slot_set_events_from_tracker(tracker)) + + _events.append(ActionExecuted(ACTION_LISTEN_NAME)) + + return _events + + class ActionDefaultFallback(ActionUtterTemplate): """Executes the fallback action and goes back to the previous state of the dialogue""" @@ -315,16 +412,18 @@ class ActionDefaultFallback(ActionUtterTemplate): def name(self) -> Text: return ACTION_DEFAULT_FALLBACK_NAME - def __init__(self): - super(ActionDefaultFallback, self).__init__("utter_default", silent_fail=True) - - async def run(self, output_channel, nlg, tracker, domain): - from rasa.core.events import UserUtteranceReverted + def __init__(self) -> None: + super().__init__("utter_default", silent_fail=True) - # only utter the template if it is available - evts = await super(ActionDefaultFallback, self).run( - output_channel, nlg, tracker, domain - ) + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + # only utter the response if it is available + evts = await super().run(output_channel, nlg, tracker, domain) return evts + [UserUtteranceReverted()] @@ -335,10 +434,14 @@ class ActionDeactivateForm(Action): def name(self) -> Text: return ACTION_DEACTIVATE_FORM_NAME - async def run(self, output_channel, nlg, tracker, domain): - from rasa.core.events import Form, SlotSet - - return [Form(None), SlotSet(REQUESTED_SLOT, None)] + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + return [ActiveLoop(None), SlotSet(REQUESTED_SLOT, None)] class RemoteAction(Action): @@ -364,7 +467,7 @@ def _action_call_format( } @staticmethod - def action_response_format_spec(): + def action_response_format_spec() -> Dict[Text, Any]: """Expected response schema for an Action endpoint. Used for validation of the response returned from the @@ -383,7 +486,7 @@ def action_response_format_spec(): }, } - def _validate_action_result(self, result): + def _validate_action_result(self, result: Dict[Text, Any]) -> bool: from jsonschema import validate from jsonschema import ValidationError @@ -410,33 +513,37 @@ async def _utter_responses( bot_messages = [] for response in responses: - if "template" in response: - kwargs = response.copy() - del kwargs["template"] + template = response.pop("template", None) + if template: draft = await nlg.generate( - response["template"], tracker, output_channel.name(), **kwargs + template, tracker, output_channel.name(), **response ) if not draft: continue - - del response["template"] + draft["template_name"] = template else: draft = {} - if "buttons" in response: - if "buttons" not in draft: - draft["buttons"] = [] - draft["buttons"].extend(response["buttons"]) - del response["buttons"] + buttons = response.pop("buttons", []) or [] + if buttons: + draft.setdefault("buttons", []) + draft["buttons"].extend(buttons) + # Avoid overwriting `draft` values with empty values + response = {k: v for k, v in response.items() if v} draft.update(response) - bot_messages.append(create_bot_utterance(draft)) + return bot_messages - async def run(self, output_channel, nlg, tracker, domain) -> List[Event]: + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: json_body = self._action_call_format(tracker, domain) - if not self.action_endpoint: logger.error( "The model predicted the custom action '{}', " @@ -456,6 +563,7 @@ async def run(self, output_channel, nlg, tracker, domain) -> List[Event]: response = await self.action_endpoint.request( json=json_body, method="post", timeout=DEFAULT_REQUEST_TIMEOUT ) + self._validate_action_result(response) events_json = response.get("events", []) @@ -509,13 +617,13 @@ class ActionExecutionRejection(Exception): """Raising this exception will allow other policies to predict a different action""" - def __init__(self, action_name, message=None): + def __init__(self, action_name: Text, message: Optional[Text] = None) -> None: self.action_name = action_name self.message = message or "Custom action '{}' rejected to run".format( action_name ) - def __str__(self): + def __str__(self) -> Text: return self.message @@ -617,15 +725,24 @@ async def run( tracker: "DialogueStateTracker", domain: "Domain", ) -> List[Event]: - intent_to_affirm = tracker.latest_message.intent.get("name") - affirmation_message = "Did you mean '{}'?".format(intent_to_affirm) + intent_to_affirm = tracker.latest_message.intent.get(INTENT_NAME_KEY) + + intent_ranking = tracker.latest_message.intent.get(INTENT_RANKING_KEY, []) + if ( + intent_to_affirm == DEFAULT_NLU_FALLBACK_INTENT_NAME + and len(intent_ranking) > 1 + ): + intent_to_affirm = intent_ranking[1][INTENT_NAME_KEY] + + affirmation_message = f"Did you mean '{intent_to_affirm}'?" message = { "text": affirmation_message, "buttons": [ - {"title": "Yes", "payload": "/{}".format(intent_to_affirm)}, - {"title": "No", "payload": "/{}".format(USER_INTENT_OUT_OF_SCOPE)}, + {"title": "Yes", "payload": f"/{intent_to_affirm}"}, + {"title": "No", "payload": f"/{USER_INTENT_OUT_OF_SCOPE}"}, ], + "template_name": self.name(), } return [create_bot_utterance(message)] @@ -637,7 +754,5 @@ class ActionDefaultAskRephrase(ActionUtterTemplate): def name(self) -> Text: return ACTION_DEFAULT_ASK_REPHRASE_NAME - def __init__(self): - super(ActionDefaultAskRephrase, self).__init__( - "utter_ask_rephrase", silent_fail=True - ) + def __init__(self) -> None: + super().__init__("utter_ask_rephrase", silent_fail=True) diff --git a/rasa/core/actions/forms.py b/rasa/core/actions/forms.py new file mode 100644 index 000000000000..67bfdff83c88 --- /dev/null +++ b/rasa/core/actions/forms.py @@ -0,0 +1,625 @@ +from enum import Enum +from typing import Text, List, Optional, Union, Any, Dict, Tuple +import logging + +from rasa.core.actions import action +from rasa.core.actions.loops import LoopAction +from rasa.core.channels import OutputChannel +from rasa.core.constants import REQUESTED_SLOT, UTTER_PREFIX +from rasa.core.domain import Domain + +from rasa.core.actions.action import ( + ActionExecutionRejection, + RemoteAction, + ACTION_LISTEN_NAME, +) +from rasa.core.events import Event, SlotSet, ActionExecuted +from rasa.core.nlg import NaturalLanguageGenerator +from rasa.core.trackers import DialogueStateTracker +from rasa.utils.endpoints import EndpointConfig + +logger = logging.getLogger(__name__) + + +class SlotMapping(Enum): + FROM_ENTITY = 0 + FROM_INTENT = 1 + FROM_TRIGGER_INTENT = 2 + FROM_TEXT = 3 + + def __str__(self) -> Text: + return self.name.lower() + + +class FormAction(LoopAction): + def __init__( + self, form_name: Text, action_endpoint: Optional[EndpointConfig] + ) -> None: + self._form_name = form_name + self.action_endpoint = action_endpoint + self._domain: Optional[Domain] = None + + def name(self) -> Text: + return self._form_name + + def required_slots(self, domain: Domain) -> List[Text]: + """A list of required slots that the form has to fill. + + Returns: + A list of slot names. + """ + return list(domain.slot_mapping_for_form(self.name()).keys()) + + def from_entity( + self, + entity: Text, + intent: Optional[Union[Text, List[Text]]] = None, + not_intent: Optional[Union[Text, List[Text]]] = None, + role: Optional[Text] = None, + group: Optional[Text] = None, + ) -> Dict[Text, Any]: + """A dictionary for slot mapping to extract slot value. + + From: + - an extracted entity + - conditioned on + - intent if it is not None + - not_intent if it is not None, + meaning user intent should not be this intent + - role if it is not None + - group if it is not None + """ + + intent, not_intent = self._list_intents(intent, not_intent) + + return { + "type": str(SlotMapping.FROM_ENTITY), + "entity": entity, + "intent": intent, + "not_intent": not_intent, + "role": role, + "group": group, + } + + def get_mappings_for_slot( + self, slot_to_fill: Text, domain: Domain + ) -> List[Dict[Text, Any]]: + """Get mappings for requested slot. + + If None, map requested slot to an entity with the same name + """ + + requested_slot_mappings = self._to_list( + domain.slot_mapping_for_form(self.name()).get( + slot_to_fill, self.from_entity(slot_to_fill) + ) + ) + # check provided slot mappings + for requested_slot_mapping in requested_slot_mappings: + if ( + not isinstance(requested_slot_mapping, dict) + or requested_slot_mapping.get("type") is None + ): + raise TypeError("Provided incompatible slot mapping") + + return requested_slot_mappings + + @staticmethod + def intent_is_desired( + requested_slot_mapping: Dict[Text, Any], tracker: "DialogueStateTracker" + ) -> bool: + """Check whether user intent matches intent conditions""" + + mapping_intents = requested_slot_mapping.get("intent", []) + mapping_not_intents = requested_slot_mapping.get("not_intent", []) + + intent = tracker.latest_message.intent.get("name") + + intent_not_blocked = not mapping_intents and intent not in mapping_not_intents + + return intent_not_blocked or intent in mapping_intents + + def entity_is_desired( + self, + requested_slot_mapping: Dict[Text, Any], + slot: Text, + entity_type_of_slot_to_fill: Optional[Text], + tracker: "DialogueStateTracker", + ) -> bool: + """Check whether slot should be filled by an entity in the input or not. + + Args: + requested_slot_mapping: Slot mapping. + slot: The slot to be filled. + entity_type_of_slot_to_fill: Entity type of slot to fill. + tracker: The tracker. + + Returns: + True, if slot should be filled, false otherwise. + """ + + # slot name is equal to the entity type + slot_equals_entity = slot == requested_slot_mapping.get("entity") + + # use the custom slot mapping 'from_entity' defined by the user to check + # whether we can fill a slot with an entity (only if a role or a group label + # is set) + if ( + requested_slot_mapping.get("role") is None + and requested_slot_mapping.get("group") is None + ) or entity_type_of_slot_to_fill != requested_slot_mapping.get("entity"): + slot_fulfils_entity_mapping = False + else: + matching_values = self.get_entity_value( + requested_slot_mapping.get("entity"), + tracker, + requested_slot_mapping.get("role"), + requested_slot_mapping.get("group"), + ) + slot_fulfils_entity_mapping = matching_values is not None + + return slot_equals_entity or slot_fulfils_entity_mapping + + @staticmethod + def get_entity_value( + name: Text, + tracker: "DialogueStateTracker", + role: Optional[Text] = None, + group: Optional[Text] = None, + ) -> Any: + """Extract entities for given name and optional role and group. + + Args: + name: entity type (name) of interest + tracker: the tracker + role: optional entity role of interest + group: optional entity group of interest + + Returns: + Value of entity. + """ + # list is used to cover the case of list slot type + value = list( + tracker.get_latest_entity_values(name, entity_group=group, entity_role=role) + ) + if len(value) == 0: + value = None + elif len(value) == 1: + value = value[0] + return value + + def extract_other_slots( + self, tracker: DialogueStateTracker, domain: Domain + ) -> Dict[Text, Any]: + """Extract the values of the other slots + if they are set by corresponding entities from the user input + else return `None`. + """ + slot_to_fill = tracker.get_slot(REQUESTED_SLOT) + + entity_type_of_slot_to_fill = self._get_entity_type_of_slot_to_fill( + slot_to_fill, domain + ) + + slot_values = {} + for slot in self.required_slots(domain): + # look for other slots + if slot != slot_to_fill: + # list is used to cover the case of list slot type + other_slot_mappings = self.get_mappings_for_slot(slot, domain) + + for other_slot_mapping in other_slot_mappings: + # check whether the slot should be filled by an entity in the input + should_fill_entity_slot = ( + other_slot_mapping["type"] == str(SlotMapping.FROM_ENTITY) + and self.intent_is_desired(other_slot_mapping, tracker) + and self.entity_is_desired( + other_slot_mapping, + slot, + entity_type_of_slot_to_fill, + tracker, + ) + ) + # check whether the slot should be + # filled from trigger intent mapping + should_fill_trigger_slot = ( + tracker.active_loop.get("name") != self.name() + and other_slot_mapping["type"] + == str(SlotMapping.FROM_TRIGGER_INTENT) + and self.intent_is_desired(other_slot_mapping, tracker) + ) + if should_fill_entity_slot: + value = self.get_entity_value( + other_slot_mapping["entity"], + tracker, + other_slot_mapping.get("role"), + other_slot_mapping.get("group"), + ) + elif should_fill_trigger_slot: + value = other_slot_mapping.get("value") + else: + value = None + + if value is not None: + logger.debug(f"Extracted '{value}' for extra slot '{slot}'.") + slot_values[slot] = value + # this slot is done, check next + break + + return slot_values + + def extract_requested_slot( + self, tracker: "DialogueStateTracker", domain: Domain + ) -> Dict[Text, Any]: + """Extract the value of requested slot from a user input + else return `None`. + """ + slot_to_fill = tracker.get_slot(REQUESTED_SLOT) + logger.debug(f"Trying to extract requested slot '{slot_to_fill}' ...") + + # get mapping for requested slot + requested_slot_mappings = self.get_mappings_for_slot(slot_to_fill, domain) + + for requested_slot_mapping in requested_slot_mappings: + logger.debug(f"Got mapping '{requested_slot_mapping}'") + + if self.intent_is_desired(requested_slot_mapping, tracker): + mapping_type = requested_slot_mapping["type"] + + if mapping_type == str(SlotMapping.FROM_ENTITY): + value = self.get_entity_value( + requested_slot_mapping.get("entity"), + tracker, + requested_slot_mapping.get("role"), + requested_slot_mapping.get("group"), + ) + elif mapping_type == str(SlotMapping.FROM_INTENT): + value = requested_slot_mapping.get("value") + elif mapping_type == str(SlotMapping.FROM_TRIGGER_INTENT): + # from_trigger_intent is only used on form activation + continue + elif mapping_type == str(SlotMapping.FROM_TEXT): + value = tracker.latest_message.text + else: + raise ValueError("Provided slot mapping type is not supported") + + if value is not None: + logger.debug( + f"Successfully extracted '{value}' for requested slot " + f"'{slot_to_fill}'" + ) + return {slot_to_fill: value} + + logger.debug(f"Failed to extract requested slot '{slot_to_fill}'") + return {} + + async def validate_slots( + self, + slot_dict: Dict[Text, Any], + tracker: "DialogueStateTracker", + domain: Domain, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + ) -> List[Event]: + """Validate the extracted slots. + + If a custom action is available for validating the slots, we call it to validate + them. Otherwise there is no validation. + + Args: + slot_dict: Extracted slots which are candidates to fill the slots required + by the form. + tracker: The current conversation tracker. + domain: The current model domain. + output_channel: The output channel which can be used to send messages + to the user. + nlg: `NaturalLanguageGenerator` to use for response generation. + + Returns: + The validation events including potential bot messages and `SlotSet` events + for the validated slots. + """ + + events = [SlotSet(slot_name, value) for slot_name, value in slot_dict.items()] + + validate_name = f"validate_{self.name()}" + + if validate_name not in domain.action_names: + return events + + _tracker = self._temporary_tracker(tracker, events, domain) + _action = RemoteAction(validate_name, self.action_endpoint) + validate_events = await _action.run(output_channel, nlg, _tracker, domain) + + validated_slot_names = [ + event.key for event in validate_events if isinstance(event, SlotSet) + ] + + # If the custom action doesn't return a SlotSet event for an extracted slot + # candidate we assume that it was valid. The custom action has to return a + # SlotSet(slot_name, None) event to mark a Slot as invalid. + return validate_events + [ + event for event in events if event.key not in validated_slot_names + ] + + def _temporary_tracker( + self, + current_tracker: DialogueStateTracker, + additional_events: List[Event], + domain: Domain, + ) -> DialogueStateTracker: + return DialogueStateTracker.from_events( + current_tracker.sender_id, + current_tracker.events_after_latest_restart() + # Insert form execution event so that it's clearly distinguishable which + # events were newly added. + + [ActionExecuted(self.name())] + additional_events, + slots=domain.slots, + ) + + async def validate( + self, + tracker: "DialogueStateTracker", + domain: Domain, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + ) -> List[Event]: + """Extract and validate value of requested slot. + + If nothing was extracted reject execution of the form action. + Subclass this method to add custom validation and rejection logic + """ + + # extract other slots that were not requested + # but set by corresponding entity or trigger intent mapping + slot_values = self.extract_other_slots(tracker, domain) + + # extract requested slot + slot_to_fill = tracker.get_slot(REQUESTED_SLOT) + if slot_to_fill: + slot_values.update(self.extract_requested_slot(tracker, domain)) + + if not slot_values: + # reject to execute the form action + # if some slot was requested but nothing was extracted + # it will allow other policies to predict another action + raise ActionExecutionRejection( + self.name(), + f"Failed to extract slot {slot_to_fill} with action {self.name()}", + ) + logger.debug(f"Validating extracted slots: {slot_values}") + return await self.validate_slots( + slot_values, tracker, domain, output_channel, nlg + ) + + async def request_next_slot( + self, + tracker: "DialogueStateTracker", + domain: Domain, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + events_so_far: List[Event], + ) -> List[Event]: + """Request the next slot and utter template if needed, else return `None`.""" + request_slot_events = [] + + # If this is not `None` it means that the custom action specified a next slot + # to request + slot_to_request = next( + ( + event.value + for event in events_so_far + if isinstance(event, SlotSet) and event.key == REQUESTED_SLOT + ), + None, + ) + temp_tracker = self._temporary_tracker(tracker, events_so_far, domain) + + if not slot_to_request: + slot_to_request = self._find_next_slot_to_request(temp_tracker, domain) + request_slot_events.append(SlotSet(REQUESTED_SLOT, slot_to_request)) + + if slot_to_request: + bot_message_events = await self._ask_for_slot( + domain, nlg, output_channel, slot_to_request, temp_tracker + ) + return request_slot_events + bot_message_events + + # no more required slots to fill + return [SlotSet(REQUESTED_SLOT, None)] + + def _find_next_slot_to_request( + self, tracker: DialogueStateTracker, domain: Domain + ) -> Optional[Text]: + return next( + ( + slot + for slot in self.required_slots(domain) + if self._should_request_slot(tracker, slot) + ), + None, + ) + + def _name_of_utterance(self, domain: Domain, slot_name: Text) -> Text: + search_path = [ + f"action_ask_{self._form_name}_{slot_name}", + f"{UTTER_PREFIX}ask_{self._form_name}_{slot_name}", + f"action_ask_{slot_name}", + ] + + found_actions = ( + action_name + for action_name in search_path + if action_name in domain.action_names + ) + + return next(found_actions, f"{UTTER_PREFIX}ask_{slot_name}") + + async def _ask_for_slot( + self, + domain: Domain, + nlg: NaturalLanguageGenerator, + output_channel: OutputChannel, + slot_name: Text, + tracker: DialogueStateTracker, + ) -> List[Event]: + logger.debug(f"Request next slot '{slot_name}'") + + action_to_ask_for_next_slot = action.action_from_name( + self._name_of_utterance(domain, slot_name), None, domain.user_actions + ) + events_to_ask_for_next_slot = await action_to_ask_for_next_slot.run( + output_channel, nlg, tracker, domain + ) + return events_to_ask_for_next_slot + + # helpers + @staticmethod + def _to_list(x: Optional[Any]) -> List[Any]: + """Convert object to a list if it is not a list, `None` converted to empty list. + """ + if x is None: + x = [] + elif not isinstance(x, list): + x = [x] + + return x + + def _list_intents( + self, + intent: Optional[Union[Text, List[Text]]] = None, + not_intent: Optional[Union[Text, List[Text]]] = None, + ) -> Tuple[List[Text], List[Text]]: + """Check provided intent and not_intent""" + if intent and not_intent: + raise ValueError( + f"Providing both intent '{intent}' and not_intent '{not_intent}' " + f"is not supported." + ) + + return self._to_list(intent), self._to_list(not_intent) + + async def _validate_if_required( + self, + tracker: "DialogueStateTracker", + domain: Domain, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + ) -> List[Event]: + """Return a list of events from `self.validate(...)`. + + Validation is required if: + - the form is active + - the form is called after `action_listen` + - form validation was not cancelled + """ + # no active_loop means that it is called during activation + need_validation = not tracker.active_loop or ( + tracker.latest_action_name == ACTION_LISTEN_NAME + and tracker.active_loop.get("validate", True) + ) + if need_validation: + logger.debug(f"Validating user input '{tracker.latest_message}'.") + return await self.validate(tracker, domain, output_channel, nlg) + + logger.debug("Skipping validation.") + return [] + + @staticmethod + def _should_request_slot(tracker: "DialogueStateTracker", slot_name: Text) -> bool: + """Check whether form action should request given slot""" + + return tracker.get_slot(slot_name) is None + + async def activate( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + """Activate form if the form is called for the first time. + + If activating, validate any required slots that were filled before + form activation and return `Form` event with the name of the form, as well + as any `SlotSet` events from validation of pre-filled slots. + + Args: + output_channel: The output channel which can be used to send messages + to the user. + nlg: `NaturalLanguageGenerator` to use for response generation. + tracker: Current conversation tracker of the user. + domain: Current model domain. + + Returns: + Events from the activation. + """ + + logger.debug(f"Activated the form '{self.name()}'.") + # collect values of required slots filled before activation + prefilled_slots = {} + + for slot_name in self.required_slots(domain): + if not self._should_request_slot(tracker, slot_name): + prefilled_slots[slot_name] = tracker.get_slot(slot_name) + + if not prefilled_slots: + logger.debug("No pre-filled required slots to validate.") + return [] + + logger.debug(f"Validating pre-filled required slots: {prefilled_slots}") + return await self.validate_slots( + prefilled_slots, tracker, domain, output_channel, nlg + ) + + async def do( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> List[Event]: + events = await self._validate_if_required(tracker, domain, output_channel, nlg) + + events += await self.request_next_slot( + tracker, domain, output_channel, nlg, events_so_far + events + ) + + return events + + async def is_done( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> bool: + return SlotSet(REQUESTED_SLOT, None) in events_so_far + + async def deactivate(self, *args: Any, **kwargs: Any) -> List[Event]: + logger.debug(f"Deactivating the form '{self.name()}'") + return [] + + def _get_entity_type_of_slot_to_fill( + self, slot_to_fill: Text, domain: "Domain" + ) -> Optional[Text]: + if not slot_to_fill: + return None + + mappings = self.get_mappings_for_slot(slot_to_fill, domain) + mappings = [ + m for m in mappings if m.get("type") == str(SlotMapping.FROM_ENTITY) + ] + + if not mappings: + return None + + entity_type = mappings[0].get("entity") + + for i in range(1, len(mappings)): + if entity_type != mappings[i].get("entity"): + return None + + return entity_type diff --git a/rasa/core/actions/loops.py b/rasa/core/actions/loops.py new file mode 100644 index 000000000000..cacb90fd238d --- /dev/null +++ b/rasa/core/actions/loops.py @@ -0,0 +1,94 @@ +from abc import ABC +from typing import List, TYPE_CHECKING + +from rasa.core.actions import Action +from rasa.core.events import Event, ActiveLoop + +if TYPE_CHECKING: + from rasa.core.channels import OutputChannel + from rasa.core.domain import Domain + from rasa.core.nlg import NaturalLanguageGenerator + from rasa.core.trackers import DialogueStateTracker + + +class LoopAction(Action, ABC): + async def run( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + events = [] + + if not await self.is_activated(output_channel, nlg, tracker, domain): + events += self._default_activation_events() + events += await self.activate(output_channel, nlg, tracker, domain) + + if not await self.is_done(output_channel, nlg, tracker, domain, events): + events += await self.do(output_channel, nlg, tracker, domain, events) + + if await self.is_done(output_channel, nlg, tracker, domain, events): + events += self._default_deactivation_events() + events += await self.deactivate( + output_channel, nlg, tracker, domain, events + ) + + return events + + async def is_activated( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> bool: + return tracker.active_loop.get("name") == self.name() + + # default implementation checks if form active + def _default_activation_events(self) -> List[Event]: + return [ActiveLoop(self.name())] + + async def activate( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + ) -> List[Event]: + # can be overwritten + return [] + + async def do( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> List[Event]: + raise NotImplementedError() + + async def is_done( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> bool: + raise NotImplementedError() + + def _default_deactivation_events(self) -> List[Event]: + return [ActiveLoop(None)] + + async def deactivate( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> List[Event]: + # can be overwritten + return [] diff --git a/rasa/core/actions/two_stage_fallback.py b/rasa/core/actions/two_stage_fallback.py new file mode 100644 index 000000000000..0a7db875ed27 --- /dev/null +++ b/rasa/core/actions/two_stage_fallback.py @@ -0,0 +1,201 @@ +import copy +import time +from typing import List, Text, Optional + +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME +from rasa.core.actions import action +from rasa.core.actions.action import ( + ACTION_DEFAULT_ASK_AFFIRMATION_NAME, + ACTION_LISTEN_NAME, + ACTION_DEFAULT_FALLBACK_NAME, + ACTION_DEFAULT_ASK_REPHRASE_NAME, +) +from rasa.core.actions.loops import LoopAction +from rasa.core.channels import OutputChannel +from rasa.core.constants import USER_INTENT_OUT_OF_SCOPE +from rasa.core.domain import Domain +from rasa.core.events import ( + Event, + UserUtteranceReverted, + ActionExecuted, + UserUttered, + ActiveLoop, +) +from rasa.core.nlg import NaturalLanguageGenerator +from rasa.core.trackers import DialogueStateTracker, EventVerbosity +from rasa.utils.endpoints import EndpointConfig + +ACTION_TWO_STAGE_FALLBACK_NAME = "two_stage_fallback" + + +class TwoStageFallbackAction(LoopAction): + def __init__(self, action_endpoint: Optional[EndpointConfig] = None) -> None: + self._action_endpoint = action_endpoint + + def name(self) -> Text: + return ACTION_TWO_STAGE_FALLBACK_NAME + + async def do( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> List[Event]: + if _user_should_affirm(tracker, events_so_far): + return await self._ask_affirm(output_channel, nlg, tracker, domain) + + return await self._ask_rephrase(output_channel, nlg, tracker, domain) + + async def _ask_affirm( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + ) -> List[Event]: + affirm_action = action.action_from_name( + ACTION_DEFAULT_ASK_AFFIRMATION_NAME, + self._action_endpoint, + domain.user_actions, + ) + + return await affirm_action.run(output_channel, nlg, tracker, domain) + + async def _ask_rephrase( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + ) -> List[Event]: + rephrase = action.action_from_name( + ACTION_DEFAULT_ASK_REPHRASE_NAME, self._action_endpoint, domain.user_actions + ) + + return await rephrase.run(output_channel, nlg, tracker, domain) + + async def is_done( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> bool: + _user_clarified = _last_intent_name(tracker) not in [ + DEFAULT_NLU_FALLBACK_INTENT_NAME, + USER_INTENT_OUT_OF_SCOPE, + ] + return ( + _user_clarified + or _two_fallbacks_in_a_row(tracker) + or _second_affirmation_failed(tracker) + ) + + async def deactivate( + self, + output_channel: "OutputChannel", + nlg: "NaturalLanguageGenerator", + tracker: "DialogueStateTracker", + domain: "Domain", + events_so_far: List[Event], + ) -> List[Event]: + if _two_fallbacks_in_a_row(tracker) or _second_affirmation_failed(tracker): + return await self._give_up(output_channel, nlg, tracker, domain) + + return await self._revert_fallback_events( + output_channel, nlg, tracker, domain, events_so_far + ) + _message_clarification(tracker) + + async def _revert_fallback_events( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + events_so_far: List[Event], + ) -> List[Event]: + revert_events = [UserUtteranceReverted(), UserUtteranceReverted()] + + temp_tracker = DialogueStateTracker.from_events( + tracker.sender_id, tracker.applied_events() + events_so_far + revert_events + ) + + while temp_tracker.latest_message and not await self.is_done( + output_channel, nlg, temp_tracker, domain, [] + ): + temp_tracker.update(revert_events[-1]) + revert_events.append(UserUtteranceReverted()) + + return revert_events + + async def _give_up( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + ) -> List[Event]: + fallback = action.action_from_name( + ACTION_DEFAULT_FALLBACK_NAME, self._action_endpoint, domain.user_actions + ) + + return await fallback.run(output_channel, nlg, tracker, domain) + + +def _last_intent_name(tracker: DialogueStateTracker) -> Optional[Text]: + last_message = tracker.latest_message + if not last_message: + return None + + return last_message.intent.get("name") + + +def _two_fallbacks_in_a_row(tracker: DialogueStateTracker) -> bool: + return _last_n_intent_names(tracker, 2) == [ + DEFAULT_NLU_FALLBACK_INTENT_NAME, + DEFAULT_NLU_FALLBACK_INTENT_NAME, + ] + + +def _last_n_intent_names( + tracker: DialogueStateTracker, number_of_last_intent_names: int +) -> List[Text]: + intent_names = [] + for i in range(number_of_last_intent_names): + message = tracker.get_last_event_for( + UserUttered, skip=i, event_verbosity=EventVerbosity.AFTER_RESTART + ) + if isinstance(message, UserUttered): + intent_names.append(message.intent.get("name")) + + return intent_names + + +def _user_should_affirm( + tracker: DialogueStateTracker, events_so_far: List[Event] +) -> bool: + fallback_was_just_activated = any( + isinstance(event, ActiveLoop) for event in events_so_far + ) + if fallback_was_just_activated: + return True + + return _last_intent_name(tracker) == DEFAULT_NLU_FALLBACK_INTENT_NAME + + +def _second_affirmation_failed(tracker: DialogueStateTracker) -> bool: + return _last_n_intent_names(tracker, 3) == [ + USER_INTENT_OUT_OF_SCOPE, + DEFAULT_NLU_FALLBACK_INTENT_NAME, + USER_INTENT_OUT_OF_SCOPE, + ] + + +def _message_clarification(tracker: DialogueStateTracker) -> List[Event]: + clarification = copy.deepcopy(tracker.latest_message) + clarification.parse_data["intent"]["confidence"] = 1.0 + clarification.timestamp = time.time() + return [ActionExecuted(ACTION_LISTEN_NAME), clarification] diff --git a/rasa/core/agent.py b/rasa/core/agent.py index f4410b7bf6df..f1772db7e18b 100644 --- a/rasa/core/agent.py +++ b/rasa/core/agent.py @@ -11,7 +11,13 @@ import rasa import rasa.utils.io -from rasa.constants import DEFAULT_DOMAIN_PATH, LEGACY_DOCS_BASE_URL +import rasa.core.utils +from rasa.constants import ( + DEFAULT_DOMAIN_PATH, + LEGACY_DOCS_BASE_URL, + ENV_SANIC_BACKLOG, + DEFAULT_CORE_SUBDIRECTORY_NAME, +) from rasa.core import constants, jobs, training from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage from rasa.core.constants import DEFAULT_REQUEST_TIMEOUT @@ -24,7 +30,11 @@ from rasa.core.policies.memoization import MemoizationPolicy from rasa.core.policies.policy import Policy from rasa.core.processor import MessageProcessor -from rasa.core.tracker_store import InMemoryTrackerStore, TrackerStore +from rasa.core.tracker_store import ( + InMemoryTrackerStore, + TrackerStore, + FailSafeTrackerStore, +) from rasa.core.trackers import DialogueStateTracker from rasa.exceptions import ModelNotFound from rasa.importers.importer import TrainingDataImporter @@ -35,7 +45,7 @@ get_model, ) from rasa.nlu.utils import is_url -from rasa.utils.common import update_sanic_log_level, set_log_level +from rasa.utils.common import raise_warning, update_sanic_log_level from rasa.utils.endpoints import EndpointConfig logger = logging.getLogger(__name__) @@ -61,41 +71,76 @@ async def load_from_server(agent: "Agent", model_server: EndpointConfig) -> "Age return agent -def _load_and_set_updated_model( - agent: "Agent", model_directory: Text, fingerprint: Text -): - """Load the persisted model into memory and set the model on the agent.""" - - logger.debug("Found new model with fingerprint {}. Loading...".format(fingerprint)) +def _load_interpreter( + agent: "Agent", nlu_path: Optional[Text] +) -> NaturalLanguageInterpreter: + """Load the NLU interpreter at `nlu_path`. - core_path, nlu_path = get_model_subdirectories(model_directory) + Args: + agent: Instance of `Agent` to inspect for an interpreter if `nlu_path` is + `None`. + nlu_path: NLU model path. + Returns: + The NLU interpreter. + """ if nlu_path: from rasa.core.interpreter import RasaNLUInterpreter - interpreter = RasaNLUInterpreter(model_directory=nlu_path) - else: - interpreter = ( - agent.interpreter if agent.interpreter is not None else RegexInterpreter() - ) + return RasaNLUInterpreter(model_directory=nlu_path) + + return agent.interpreter or RegexInterpreter() + +def _load_domain_and_policy_ensemble( + core_path: Optional[Text], +) -> Tuple[Optional[Domain], Optional[PolicyEnsemble]]: + """Load the domain and policy ensemble from the model at `core_path`. + + Args: + core_path: Core model path. + + Returns: + An instance of `Domain` and `PolicyEnsemble` if `core_path` is not `None`. + """ + policy_ensemble = None domain = None + if core_path: + policy_ensemble = PolicyEnsemble.load(core_path) domain_path = os.path.join(os.path.abspath(core_path), DEFAULT_DOMAIN_PATH) domain = Domain.load(domain_path) + return domain, policy_ensemble + + +def _load_and_set_updated_model( + agent: "Agent", model_directory: Text, fingerprint: Text +) -> None: + """Load the persisted model into memory and set the model on the agent. + + Args: + agent: Instance of `Agent` to update with the new model. + model_directory: Rasa model directory. + fingerprint: Fingerprint of the supplied model at `model_directory`. + """ + logger.debug(f"Found new model with fingerprint {fingerprint}. Loading...") + + core_path, nlu_path = get_model_subdirectories(model_directory) + try: - policy_ensemble = None - if core_path: - policy_ensemble = PolicyEnsemble.load(core_path) + interpreter = _load_interpreter(agent, nlu_path) + domain, policy_ensemble = _load_domain_and_policy_ensemble(core_path) + agent.update_model( domain, policy_ensemble, fingerprint, interpreter, model_directory ) + logger.debug("Finished updating agent to new model.") - except Exception: + except Exception as e: logger.exception( - "Failed to load policy and update agent. " - "The previous model will stay loaded instead." + f"Failed to update model. The previous model will stay loaded instead. " + f"Error: {e}" ) @@ -114,7 +159,7 @@ async def _update_model_from_server( model_directory, new_model_fingerprint = model_directory_and_fingerprint _load_and_set_updated_model(agent, model_directory, new_model_fingerprint) else: - logger.debug("No new model found at URL {}".format(model_server.url)) + logger.debug(f"No new model found at URL {model_server.url}") async def _pull_model_and_fingerprint( @@ -128,11 +173,10 @@ async def _pull_model_and_fingerprint( headers = {"If-None-Match": fingerprint} - logger.debug("Requesting model from server {}...".format(model_server.url)) + logger.debug(f"Requesting model from server {model_server.url}...") async with model_server.session() as session: try: - set_log_level() params = model_server.combine_parameters() async with session.request( "GET", @@ -263,15 +307,15 @@ async def load_agent( ) else: - logger.warning("No valid configuration given to load agent.") + raise_warning("No valid configuration given to load agent.") return None except Exception as e: - logger.error("Could not load model due to {}.".format(e)) + logger.error(f"Could not load model due to {e}.") raise -class Agent(object): +class Agent: """The Agent class provides a convenient interface for the most important Rasa functionality. @@ -291,6 +335,7 @@ def __init__( model_directory: Optional[Text] = None, model_server: Optional[EndpointConfig] = None, remote_storage: Optional[Text] = None, + path_to_model_archive: Optional[Text] = None, ): # Initializing variables with the passed parameters. self.domain = self._create_domain(domain) @@ -299,6 +344,7 @@ def __init__( if self.domain is not None: self.domain.add_requested_slot() self.domain.add_knowledge_base_slots() + self.domain.add_categorical_slot_default_value() PolicyEnsemble.check_domain_ensemble_compatibility( self.policy_ensemble, self.domain @@ -315,6 +361,7 @@ def __init__( self.model_directory = model_directory self.model_server = model_server self.remote_storage = remote_storage + self.path_to_model_archive = path_to_model_archive def update_model( self, @@ -324,7 +371,7 @@ def update_model( interpreter: Optional[NaturalLanguageInterpreter] = None, model_directory: Optional[Text] = None, ) -> None: - self.domain = domain + self.domain = self._create_domain(domain) self.policy_ensemble = policy_ensemble if interpreter: @@ -350,13 +397,14 @@ def load( action_endpoint: Optional[EndpointConfig] = None, model_server: Optional[EndpointConfig] = None, remote_storage: Optional[Text] = None, + path_to_model_archive: Optional[Text] = None, ) -> "Agent": """Load a persisted model from the passed path.""" try: if not model_path: raise ModelNotFound("No path specified.") elif not os.path.exists(model_path): - raise ModelNotFound("No file or directory at '{}'.".format(model_path)) + raise ModelNotFound(f"No file or directory at '{model_path}'.") elif os.path.isfile(model_path): model_path = get_model(model_path) except ModelNotFound: @@ -394,19 +442,20 @@ def load( model_directory=model_path, model_server=model_server, remote_storage=remote_storage, + path_to_model_archive=path_to_model_archive, ) - def is_core_ready(self): + def is_core_ready(self) -> bool: """Check if all necessary components and policies are ready to use the agent. """ - return self.is_ready() and self.policy_ensemble + return self.is_ready() and self.policy_ensemble is not None - def is_ready(self): + def is_ready(self) -> bool: """Check if all necessary components are instantiated to use agent. Policies might not be available, if this is an NLU only agent.""" - return self.tracker_store and self.interpreter + return self.tracker_store is not None and self.interpreter is not None async def parse_message_using_nlu_interpreter( self, message_data: Text, tracker: DialogueStateTracker = None @@ -444,18 +493,15 @@ async def handle_message( self, message: UserMessage, message_preprocessor: Optional[Callable[[Text], Text]] = None, - **kwargs + **kwargs, ) -> Optional[List[Dict[Text, Any]]]: """Handle a single message.""" if not isinstance(message, UserMessage): - logger.warning( + # DEPRECATION EXCEPTION - remove in 2.1 + raise Exception( "Passing a text to `agent.handle_message(...)` is " - "deprecated. Rather use `agent.handle_text(...)`." - ) - # noinspection PyTypeChecker - return await self.handle_text( - message, message_preprocessor=message_preprocessor, **kwargs + "not supported anymore. Rather use `agent.handle_text(...)`." ) def noop(_): @@ -471,18 +517,20 @@ def noop(_): return await processor.handle_message(message) # noinspection PyUnusedLocal - def predict_next(self, sender_id: Text, **kwargs: Any) -> Optional[Dict[Text, Any]]: + async def predict_next( + self, sender_id: Text, **kwargs: Any + ) -> Optional[Dict[Text, Any]]: """Handle a single message.""" processor = self.create_processor() - return processor.predict_next(sender_id) + return await processor.predict_next(sender_id) # noinspection PyUnusedLocal async def log_message( self, message: UserMessage, message_preprocessor: Optional[Callable[[Text], Text]] = None, - **kwargs: Any + **kwargs: Any, ) -> DialogueStateTracker: """Append a message to a dialogue - does not predict actions.""" @@ -504,6 +552,20 @@ async def execute_action( sender_id, action, output_channel, self.nlg, policy, confidence ) + async def trigger_intent( + self, + intent_name: Text, + entities: List[Dict[Text, Any]], + output_channel: OutputChannel, + tracker: DialogueStateTracker, + ) -> None: + """Trigger a user intent, e.g. triggered by an external event.""" + + processor = self.create_processor() + await processor.trigger_external_user_uttered( + intent_name, entities, tracker, output_channel + ) + async def handle_text( self, text_message: Union[Text, Dict[Text, Any]], @@ -526,7 +588,7 @@ async def handle_text( >>> from rasa.core.agent import Agent >>> from rasa.core.interpreter import RasaNLUInterpreter - >>> agent = Agent.load("examples/restaurantbot/models/current") + >>> agent = Agent.load("examples/moodbot/models") >>> await agent.handle_text("hello") [u'how can I help you?'] @@ -546,7 +608,7 @@ def toggle_memoization(self, activate: bool) -> None: the prediction of that policy. When set to ``False`` the Memoization policies present in the policy ensemble will not make any predictions. Hence, the prediction result from the ensemble always needs to come - from a different policy (e.g. ``KerasPolicy``). Useful to test + from a different policy (e.g. ``TEDPolicy``). Useful to test prediction capabilities of an ensemble when ignoring memorized turns from the training data.""" @@ -559,32 +621,28 @@ def toggle_memoization(self, activate: bool) -> None: if type(p) == MemoizationPolicy: p.toggle(activate) - def continue_training( - self, trackers: List[DialogueStateTracker], **kwargs: Any - ) -> None: - - if not self.is_core_ready(): - raise AgentNotReady("Can't continue training without a policy ensemble.") - - self.policy_ensemble.continue_training(trackers, self.domain, **kwargs) - self._set_fingerprint() - - def _max_history(self): + def _max_history(self) -> int: """Find maximum max_history.""" max_histories = [ policy.featurizer.max_history for policy in self.policy_ensemble.policies - if hasattr(policy.featurizer, "max_history") + if policy.featurizer + and hasattr(policy.featurizer, "max_history") + and policy.featurizer.max_history is not None ] return max(max_histories or [0]) - def _are_all_featurizers_using_a_max_history(self): + def _are_all_featurizers_using_a_max_history(self) -> bool: """Check if all featurizers are MaxHistoryTrackerFeaturizer.""" - def has_max_history_featurizer(policy): - return policy.featurizer and hasattr(policy.featurizer, "max_history") + def has_max_history_featurizer(policy: Policy) -> bool: + return ( + policy.featurizer + and hasattr(policy.featurizer, "max_history") + and policy.featurizer.max_history is not None + ) for p in self.policy_ensemble.policies: if p.featurizer and not has_max_history_featurizer(p): @@ -600,7 +658,7 @@ async def load_data( tracker_limit: Optional[int] = None, use_story_concatenation: bool = True, debug_plots: bool = False, - exclusion_percentage: int = None, + exclusion_percentage: Optional[int] = None, ) -> List[DialogueStateTracker]: """Load training data from a resource.""" @@ -615,14 +673,13 @@ async def load_data( unique_last_num_states = max_history elif unique_last_num_states < max_history: # possibility of data loss - logger.warning( - "unique_last_num_states={} but " - "maximum max_history={}." - "Possibility of data loss. " - "It is recommended to set " - "unique_last_num_states to " - "at least maximum max_history." - "".format(unique_last_num_states, max_history) + raise_warning( + f"unique_last_num_states={unique_last_num_states} but " + f"maximum max_history={max_history}. " + f"Possibility of data loss. " + f"It is recommended to set " + f"unique_last_num_states to " + f"at least maximum max_history." ) return await training.load_data( @@ -682,42 +739,12 @@ def train( "to `agent.train(data)`." ) - logger.debug("Agent trainer got kwargs: {}".format(kwargs)) + logger.debug(f"Agent trainer got kwargs: {kwargs}") - self.policy_ensemble.train(training_trackers, self.domain, **kwargs) - self._set_fingerprint() - - def handle_channels( - self, - channels: List[InputChannel], - http_port: int = constants.DEFAULT_SERVER_PORT, - route: Text = "/webhooks/", - cors=None, - ) -> Sanic: - """Start a webserver attaching the input channels and handling msgs.""" - - from rasa.core import run - - logger.warning( - "DEPRECATION warning: Using `handle_channels` is deprecated. " - "Please use `rasa.run(...)` or see " - "`rasa.core.run.configure_app(...)` if you want to implement " - "this on a more detailed level." + self.policy_ensemble.train( + training_trackers, self.domain, interpreter=self.interpreter, **kwargs ) - - app = run.configure_app(channels, cors, None, enable_api=False, route=route) - - app.agent = self - - update_sanic_log_level() - - app.run(host="0.0.0.0", port=http_port) - - # this might seem unnecessary (as run does not return until the server - # is killed) - but we use it for tests where we mock `.run` to directly - # return and need the app to inspect if we created a properly - # configured server - return app + self._set_fingerprint() def _set_fingerprint(self, fingerprint: Optional[Text] = None) -> None: @@ -753,18 +780,18 @@ def _clear_model_directory(model_path: Text) -> None: "overwritten.".format(model_path) ) - def persist(self, model_path: Text, dump_flattened_stories: bool = False) -> None: + def persist(self, model_path: Text) -> None: """Persists this agent into a directory for later loading and usage.""" if not self.is_core_ready(): raise AgentNotReady("Can't persist without a policy ensemble.") - if not model_path.endswith("core"): - model_path = os.path.join(model_path, "core") + if not model_path.endswith(DEFAULT_CORE_SUBDIRECTORY_NAME): + model_path = os.path.join(model_path, DEFAULT_CORE_SUBDIRECTORY_NAME) self._clear_model_directory(model_path) - self.policy_ensemble.persist(model_path, dump_flattened_stories) + self.policy_ensemble.persist(model_path) self.domain.persist(os.path.join(model_path, DEFAULT_DOMAIN_PATH)) self.domain.persist_specification(model_path) @@ -780,7 +807,7 @@ async def visualize( fontsize: int = 12, ) -> None: from rasa.core.training.visualization import visualize_stories - from rasa.core.training.dsl import StoryFileReader + from rasa.core.training import loading """Visualize the loaded training data from the resource.""" @@ -788,7 +815,7 @@ async def visualize( # largest value from any policy max_history = max_history or self._max_history() - story_steps = await StoryFileReader.read_from_folder(resource_name, self.domain) + story_steps = await loading.load_data_from_resource(resource_name, self.domain) await visualize_stories( story_steps, self.domain, @@ -823,7 +850,7 @@ def create_processor( ) @staticmethod - def _create_domain(domain: Union[Domain, Text]) -> Domain: + def _create_domain(domain: Union[Domain, Text, None]) -> Domain: if isinstance(domain, str): domain = Domain.load(domain) @@ -846,9 +873,11 @@ def create_tracker_store( ) -> TrackerStore: if store is not None: store.domain = domain - return store + tracker_store = store else: - return InMemoryTrackerStore(domain) + tracker_store = InMemoryTrackerStore(domain) + + return FailSafeTrackerStore(tracker_store) @staticmethod def _create_lock_store(store: Optional[LockStore]) -> LockStore: @@ -892,7 +921,7 @@ def load_local_model( model_archive = get_latest_model(model_path) if model_archive is None: - logger.warning("Could not load local model in '{}'".format(model_path)) + raise_warning(f"Could not load local model in '{model_path}'.") return Agent() working_directory = tempfile.mkdtemp() @@ -907,6 +936,7 @@ def load_local_model( action_endpoint=action_endpoint, model_server=model_server, remote_storage=remote_storage, + path_to_model_archive=model_archive, ) @staticmethod diff --git a/rasa/core/brokers/broker.py b/rasa/core/brokers/broker.py new file mode 100644 index 000000000000..8dbc333f8e70 --- /dev/null +++ b/rasa/core/brokers/broker.py @@ -0,0 +1,91 @@ +import logging +from typing import Any, Dict, Text, Optional, Union + +from rasa.utils import common +from rasa.utils.endpoints import EndpointConfig + +logger = logging.getLogger(__name__) + + +class EventBroker: + """Base class for any event broker implementation.""" + + @staticmethod + def create( + obj: Union["EventBroker", EndpointConfig, None], + ) -> Optional["EventBroker"]: + """Factory to create an event broker.""" + if isinstance(obj, EventBroker): + return obj + + return _create_from_endpoint_config(obj) + + @classmethod + def from_endpoint_config(cls, broker_config: EndpointConfig) -> "EventBroker": + raise NotImplementedError( + "Event broker must implement the `from_endpoint_config` method." + ) + + def publish(self, event: Dict[Text, Any]) -> None: + """Publishes a json-formatted Rasa Core event into an event queue.""" + raise NotImplementedError("Event broker must implement the `publish` method.") + + def is_ready(self) -> bool: + """Determine whether or not the event broker is ready. + + Returns: + `True` by default, but this may be overridden by subclasses. + """ + return True + + def close(self) -> None: + """Close the connection to an event broker.""" + # default implementation does nothing + pass + + +def _create_from_endpoint_config( + endpoint_config: Optional[EndpointConfig], +) -> Optional["EventBroker"]: + """Instantiate an event broker based on its configuration.""" + + if endpoint_config is None: + broker = None + elif endpoint_config.type is None or endpoint_config.type.lower() == "pika": + from rasa.core.brokers.pika import PikaEventBroker + + # default broker if no type is set + broker = PikaEventBroker.from_endpoint_config(endpoint_config) + elif endpoint_config.type.lower() == "sql": + from rasa.core.brokers.sql import SQLEventBroker + + broker = SQLEventBroker.from_endpoint_config(endpoint_config) + elif endpoint_config.type.lower() == "file": + from rasa.core.brokers.file import FileEventBroker + + broker = FileEventBroker.from_endpoint_config(endpoint_config) + elif endpoint_config.type.lower() == "kafka": + from rasa.core.brokers.kafka import KafkaEventBroker + + broker = KafkaEventBroker.from_endpoint_config(endpoint_config) + else: + broker = _load_from_module_name_in_endpoint_config(endpoint_config) + + if broker: + logger.debug(f"Instantiated event broker to '{broker.__class__.__name__}'.") + return broker + + +def _load_from_module_name_in_endpoint_config( + broker_config: EndpointConfig, +) -> Optional["EventBroker"]: + """Instantiate an event broker based on its class name.""" + try: + event_broker_class = common.class_from_module_path(broker_config.type) + return event_broker_class.from_endpoint_config(broker_config) + except (AttributeError, ImportError) as e: + logger.warning( + f"The `EventBroker` type '{broker_config.type}' could not be found. " + f"Not using any event broker. Error: {e}" + ) + return None diff --git a/rasa/core/brokers/event_channel.py b/rasa/core/brokers/event_channel.py deleted file mode 100644 index 32f39591b3f7..000000000000 --- a/rasa/core/brokers/event_channel.py +++ /dev/null @@ -1,19 +0,0 @@ -import logging -from typing import Any, Dict, Text, Optional - -from rasa.utils.endpoints import EndpointConfig - -logger = logging.getLogger(__name__) - - -class EventChannel(object): - @classmethod - def from_endpoint_config(cls, broker_config: EndpointConfig) -> "EventChannel": - raise NotImplementedError( - "Event broker must implement the `from_endpoint_config` method." - ) - - def publish(self, event: Dict[Text, Any]) -> None: - """Publishes a json-formatted Rasa Core event into an event queue.""" - - raise NotImplementedError("Event broker must implement the `publish` method.") diff --git a/rasa/core/brokers/file_producer.py b/rasa/core/brokers/file.py similarity index 85% rename from rasa/core/brokers/file_producer.py rename to rasa/core/brokers/file.py index 8a94a9de2eb4..ded31402c5a9 100644 --- a/rasa/core/brokers/file_producer.py +++ b/rasa/core/brokers/file.py @@ -3,7 +3,7 @@ import typing from typing import Optional, Text, Dict -from rasa.core.brokers.event_channel import EventChannel +from rasa.core.brokers.broker import EventBroker if typing.TYPE_CHECKING: from rasa.utils.endpoints import EndpointConfig @@ -11,7 +11,7 @@ logger = logging.getLogger(__name__) -class FileProducer(EventChannel): +class FileEventBroker(EventBroker): """Log events to a file in json format. There will be one event per line and each event is stored as json.""" @@ -25,14 +25,14 @@ def __init__(self, path: Optional[Text] = None) -> None: @classmethod def from_endpoint_config( cls, broker_config: Optional["EndpointConfig"] - ) -> Optional["FileProducer"]: + ) -> Optional["FileEventBroker"]: if broker_config is None: return None # noinspection PyArgumentList return cls(**broker_config.kwargs) - def _event_logger(self): + def _event_logger(self) -> logging.Logger: """Instantiate the file logger.""" logger_file = self.path @@ -44,7 +44,7 @@ def _event_logger(self): query_logger.propagate = False query_logger.addHandler(handler) - logger.info("Logging events to '{}'.".format(logger_file)) + logger.info(f"Logging events to '{logger_file}'.") return query_logger diff --git a/rasa/core/brokers/kafka.py b/rasa/core/brokers/kafka.py index 25f8369ae510..fa2340fab00d 100644 --- a/rasa/core/brokers/kafka.py +++ b/rasa/core/brokers/kafka.py @@ -2,12 +2,15 @@ import logging from typing import Optional -from rasa.core.brokers.event_channel import EventChannel +from rasa.constants import DOCS_URL_EVENT_BROKERS +from rasa.core.brokers.broker import EventBroker +from rasa.utils.common import raise_warning +from rasa.utils.io import DEFAULT_ENCODING logger = logging.getLogger(__name__) -class KafkaProducer(EventChannel): +class KafkaEventBroker(EventBroker): def __init__( self, host, @@ -20,7 +23,7 @@ def __init__( topic="rasa_core_events", security_protocol="SASL_PLAINTEXT", loglevel=logging.ERROR, - ): + ) -> None: self.producer = None self.host = host @@ -36,24 +39,24 @@ def __init__( logging.getLogger("kafka").setLevel(loglevel) @classmethod - def from_endpoint_config(cls, broker_config) -> Optional["KafkaProducer"]: + def from_endpoint_config(cls, broker_config) -> Optional["KafkaEventBroker"]: if broker_config is None: return None return cls(broker_config.url, **broker_config.kwargs) - def publish(self, event): + def publish(self, event) -> None: self._create_producer() self._publish(event) self._close() - def _create_producer(self): + def _create_producer(self) -> None: import kafka if self.security_protocol == "SASL_PLAINTEXT": self.producer = kafka.KafkaProducer( bootstrap_servers=[self.host], - value_serializer=lambda v: json.dumps(v).encode("utf-8"), + value_serializer=lambda v: json.dumps(v).encode(DEFAULT_ENCODING), sasl_plain_username=self.sasl_username, sasl_plain_password=self.sasl_password, sasl_mechanism="PLAIN", @@ -62,7 +65,7 @@ def _create_producer(self): elif self.security_protocol == "SSL": self.producer = kafka.KafkaProducer( bootstrap_servers=[self.host], - value_serializer=lambda v: json.dumps(v).encode("utf-8"), + value_serializer=lambda v: json.dumps(v).encode(DEFAULT_ENCODING), ssl_cafile=self.ssl_cafile, ssl_certfile=self.ssl_certfile, ssl_keyfile=self.ssl_keyfile, @@ -70,8 +73,8 @@ def _create_producer(self): security_protocol=self.security_protocol, ) - def _publish(self, event): + def _publish(self, event) -> None: self.producer.send(self.topic, event) - def _close(self): + def _close(self) -> None: self.producer.close() diff --git a/rasa/core/brokers/pika.py b/rasa/core/brokers/pika.py index 16e6b5621d73..0899ef33a372 100644 --- a/rasa/core/brokers/pika.py +++ b/rasa/core/brokers/pika.py @@ -1,25 +1,54 @@ import json import logging -import typing -from typing import Dict, Optional, Text, Union - +import os import time - -from rasa.core.brokers.event_channel import EventChannel +import typing +from collections import deque +from contextlib import contextmanager +from threading import Thread +from typing import ( + Callable, + Deque, + Dict, + Optional, + Text, + Union, + Any, + List, + Tuple, + Generator, +) + +from rasa.constants import ( + DEFAULT_LOG_LEVEL_LIBRARIES, + ENV_LOG_LEVEL_LIBRARIES, + DOCS_URL_PIKA_EVENT_BROKER, +) +from rasa.core.brokers.broker import EventBroker +from rasa.utils.common import raise_warning from rasa.utils.endpoints import EndpointConfig +from rasa.utils.io import DEFAULT_ENCODING if typing.TYPE_CHECKING: - from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection + from pika.adapters.blocking_connection import BlockingChannel + from pika import SelectConnection, BlockingConnection, BasicProperties + from pika.channel import Channel + import pika + from pika.connection import Parameters, Connection logger = logging.getLogger(__name__) +RABBITMQ_EXCHANGE = "rasa-exchange" +DEFAULT_QUEUE_NAME = "rasa_core_events" + def initialise_pika_connection( host: Text, username: Text, password: Text, + port: Union[Text, int] = 5672, connection_attempts: int = 20, - retry_delay_in_seconds: Union[int, float] = 5, + retry_delay_in_seconds: float = 5, ) -> "BlockingConnection": """Create a Pika `BlockingConnection`. @@ -27,17 +56,71 @@ def initialise_pika_connection( host: Pika host username: username for authentication with Pika host password: password for authentication with Pika host + port: port of the Pika host connection_attempts: number of channel attempts before giving up retry_delay_in_seconds: delay in seconds between channel attempts Returns: - Pika `BlockingConnection` with provided parameters + `pika.BlockingConnection` with provided parameters + """ + import pika + + with _pika_log_level(logging.CRITICAL): + parameters = _get_pika_parameters( + host, username, password, port, connection_attempts, retry_delay_in_seconds + ) + return pika.BlockingConnection(parameters) + + +@contextmanager +def _pika_log_level(temporary_log_level: int) -> Generator[None, None, None]: + """Change the log level of the `pika` library. + + The log level will remain unchanged if the current log level is 10 (`DEBUG`) or + lower. + + Args: + temporary_log_level: Temporary log level for pika. Will be reverted to + previous log level when context manager exits. """ + pika_logger = logging.getLogger("pika") + old_log_level = pika_logger.level + is_debug_mode = logging.root.level <= logging.DEBUG + + if not is_debug_mode: + pika_logger.setLevel(temporary_log_level) + + yield + + pika_logger.setLevel(old_log_level) + + +def _get_pika_parameters( + host: Text, + username: Text, + password: Text, + port: Union[Text, int] = 5672, + connection_attempts: int = 20, + retry_delay_in_seconds: float = 5, +) -> "Parameters": + """Create Pika `Parameters`. + Args: + host: Pika host + username: username for authentication with Pika host + password: password for authentication with Pika host + port: port of the Pika host + connection_attempts: number of channel attempts before giving up + retry_delay_in_seconds: delay in seconds between channel attempts + + Returns: + `pika.ConnectionParameters` which can be used to create a new connection to a + broker. + """ import pika if host.startswith("amqp"): - # user supplied a amqp url containing all the info + # user supplied an AMQP URL containing all the info parameters = pika.URLParameters(host) parameters.connection_attempts = connection_attempts parameters.retry_delay = retry_delay_in_seconds @@ -47,14 +130,42 @@ def initialise_pika_connection( # host seems to be just the host, so we use our parameters parameters = pika.ConnectionParameters( host, + port=port, credentials=pika.PlainCredentials(username, password), connection_attempts=connection_attempts, # Wait between retries since # it can take some time until # RabbitMQ comes up. retry_delay=retry_delay_in_seconds, + ssl_options=create_rabbitmq_ssl_options(host), ) - return pika.BlockingConnection(parameters) + + return parameters + + +def initialise_pika_select_connection( + parameters: "Parameters", + on_open_callback: Callable[["SelectConnection"], None], + on_open_error_callback: Callable[["SelectConnection", Text], None], +) -> "SelectConnection": + """Create a non-blocking Pika `SelectConnection`. + + Args: + parameters: Parameters which should be used to connect. + on_open_callback: Callback which is called when the connection was established. + on_open_error_callback: Callback which is called when connecting to the broker + failed. + + Returns: + A callback-based connection to the RabbitMQ event broker. + """ + import pika + + return pika.SelectConnection( + parameters, + on_open_callback=on_open_callback, + on_open_error_callback=on_open_error_callback, + ) def initialise_pika_channel( @@ -62,25 +173,26 @@ def initialise_pika_channel( queue: Text, username: Text, password: Text, + port: Union[Text, int] = 5672, connection_attempts: int = 20, - retry_delay_in_seconds: Union[int, float] = 5, + retry_delay_in_seconds: float = 5, ) -> "BlockingChannel": """Initialise a Pika channel with a durable queue. Args: - host: Pika host - queue: Pika queue to declare - username: username for authentication with Pika host - password: password for authentication with Pika host - connection_attempts: number of channel attempts before giving up - retry_delay_in_seconds: delay in seconds between channel attempts + host: Pika host. + queue: Pika queue to declare. + username: Username for authentication with Pika host. + password: Password for authentication with Pika host. + port: port of the Pika host. + connection_attempts: Number of channel attempts before giving up. + retry_delay_in_seconds: Delay in seconds between channel attempts. Returns: - Pika `BlockingChannel` with declared queue + Pika `BlockingChannel` with declared queue. """ - connection = initialise_pika_connection( - host, username, password, connection_attempts, retry_delay_in_seconds + host, username, password, port, connection_attempts, retry_delay_in_seconds ) return _declare_pika_channel_with_queue(connection, queue) @@ -90,28 +202,47 @@ def _declare_pika_channel_with_queue( connection: "BlockingConnection", queue: Text ) -> "BlockingChannel": """Declare a durable queue on Pika channel.""" - channel = connection.channel() channel.queue_declare(queue, durable=True) return channel -def close_pika_channel(channel: "BlockingChannel") -> None: - """Attempt to close Pika channel.""" +def close_pika_channel( + channel: "Channel", + attempts: int = 1000, + time_between_attempts_in_seconds: float = 0.001, +) -> None: + """Attempt to close Pika channel and wait until it is closed. + Args: + channel: Pika `Channel` to close. + attempts: How many times to try to confirm that the channel has indeed been + closed. + time_between_attempts_in_seconds: Wait time between attempts to confirm closed + state. + """ from pika.exceptions import AMQPError try: channel.close() - logger.debug("Successfully closed Pika channel.") + logger.debug("Successfully initiated closing of Pika channel.") except AMQPError: - logger.exception("Failed to close Pika channel.") + logger.exception("Failed to initiate closing of Pika channel.") + while attempts: + if channel.is_closed: + logger.debug("Successfully closed Pika channel.") + return None -def close_pika_connection(connection: "BlockingConnection") -> None: - """Attempt to close Pika connection.""" + time.sleep(time_between_attempts_in_seconds) + attempts -= 1 + logger.exception("Failed to close Pika channel.") + + +def close_pika_connection(connection: "Connection") -> None: + """Attempt to close Pika connection.""" from pika.exceptions import AMQPError try: @@ -121,86 +252,355 @@ def close_pika_connection(connection: "BlockingConnection") -> None: logger.exception("Failed to close Pika connection with host.") -class PikaProducer(EventChannel): +class PikaEventBroker(EventBroker): + """Pika-based event broker for publishing messages to RabbitMQ.""" + def __init__( self, host: Text, username: Text, password: Text, - queue: Text = "rasa_core_events", - loglevel: Union[Text, int] = logging.WARNING, + port: Union[int, Text] = 5672, + queues: Union[List[Text], Tuple[Text], Text, None] = None, + should_keep_unpublished_messages: bool = True, + raise_on_failure: bool = False, + log_level: Union[Text, int] = os.environ.get( + ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES + ), + **kwargs: Any, ): - logging.getLogger("pika").setLevel(loglevel) + """Initialise RabbitMQ event broker. + + Args: + host: Pika host. + username: Username for authentication with Pika host. + password: Password for authentication with Pika host. + port: port of the Pika host. + queues: Pika queues to declare and publish to. + should_keep_unpublished_messages: Whether or not the event broker should + maintain a queue of unpublished messages to be published later in + case of errors. + raise_on_failure: Whether to raise an exception if publishing fails. If + `False`, keep retrying. + log_level: Logging level. + """ + logging.getLogger("pika").setLevel(log_level) - self.queue = queue self.host = host self.username = username self.password = password - self.channel = None # delay opening channel until first event + self.port = port + self.channel: Optional["Channel"] = None + self.queues = self._get_queues_from_args(queues, kwargs) + self.should_keep_unpublished_messages = should_keep_unpublished_messages + self.raise_on_failure = raise_on_failure + + # List to store unpublished messages which hopefully will be published later + self._unpublished_messages: Deque[Text] = deque() + self._run_pika() def __del__(self) -> None: if self.channel: close_pika_channel(self.channel) close_pika_connection(self.channel.connection) - def _open_channel( - self, - connection_attempts: int = 20, - retry_delay_in_seconds: Union[int, float] = 5, - ) -> "BlockingChannel": - return initialise_pika_channel( - self.host, - self.queue, - self.username, - self.password, - connection_attempts, - retry_delay_in_seconds, + def close(self) -> None: + """Close the pika channel and connection.""" + self.__del__() + + @property + def rasa_environment(self) -> Optional[Text]: + """Get value of the `RASA_ENVIRONMENT` environment variable.""" + return os.environ.get("RASA_ENVIRONMENT") + + @staticmethod + def _get_queues_from_args( + queues_arg: Union[List[Text], Tuple[Text], Text, None], kwargs: Any + ) -> Union[List[Text], Tuple[Text]]: + """Get queues for this event broker. + + The preferred argument defining the RabbitMQ queues the `PikaEventBroker` should + publish to is `queues` (as of Rasa Open Source version 1.8.2). This function + ensures backwards compatibility with the old `queue` argument. This method + can be removed in the future, and `self.queues` should just receive the value of + the `queues` kwarg in the constructor. + + Args: + queues_arg: Value of the supplied `queues` argument. + kwargs: Additional kwargs supplied to the `PikaEventBroker` constructor. + If `queues_arg` is not supplied, the `queue` kwarg will be used instead. + + Returns: + Queues this event broker publishes to. + + Raises: + `ValueError` if no valid `queue` or `queues` argument was found. + """ + queue_arg = kwargs.pop("queue", None) + + if queue_arg: + raise_warning( + "Your Pika event broker config contains the deprecated `queue` key. " + "Please use the `queues` key instead.", + FutureWarning, + docs=DOCS_URL_PIKA_EVENT_BROKER, + ) + + if queues_arg and isinstance(queues_arg, (list, tuple)): + return queues_arg + + if queues_arg and isinstance(queues_arg, str): + logger.debug( + f"Found a string value under the `queues` key of the Pika event broker " + f"config. Please supply a list of queues under this key, even if it is " + f"just a single one. See {DOCS_URL_PIKA_EVENT_BROKER}" + ) + return [queues_arg] + + if queue_arg and isinstance(queue_arg, str): + return [queue_arg] + + if queue_arg: + return queue_arg # pytype: disable=bad-return-type + + raise_warning( + f"No `queues` or `queue` argument provided. It is suggested to " + f"explicitly specify a queue as described in " + f"{DOCS_URL_PIKA_EVENT_BROKER}. " + f"Using the default queue '{DEFAULT_QUEUE_NAME}' for now." ) + return [DEFAULT_QUEUE_NAME] + @classmethod def from_endpoint_config( cls, broker_config: Optional["EndpointConfig"] - ) -> Optional["PikaProducer"]: + ) -> Optional["PikaEventBroker"]: + """Initialise `PikaEventBroker` from `EndpointConfig`. + + Args: + broker_config: `EndpointConfig` to read. + + Returns: + `PikaEventBroker` if `broker_config` was supplied, else `None`. + """ if broker_config is None: return None return cls(broker_config.url, **broker_config.kwargs) - def publish(self, event: Dict, retries=60, retry_delay_in_seconds: int = 5) -> None: - """Publish `event` into Pika queue. + def _run_pika(self) -> None: + parameters = _get_pika_parameters( + self.host, self.username, self.password, self.port + ) + self._pika_connection = initialise_pika_select_connection( + parameters, self._on_open_connection, self._on_open_connection_error + ) + # Run Pika io loop in extra thread so it's not blocking + self._run_pika_io_loop_in_thread() + + def _on_open_connection(self, connection: "SelectConnection") -> None: + logger.debug(f"RabbitMQ connection to '{self.host}' was established.") + connection.channel(on_open_callback=self._on_channel_open) + + def _on_open_connection_error(self, _, error: Text) -> None: + logger.warning( + f"Connecting to '{self.host}' failed with error '{error}'. Trying again." + ) + + def _on_channel_open(self, channel: "Channel") -> None: + logger.debug("RabbitMQ channel was opened. Declaring fanout exchange.") + + # declare exchange of type 'fanout' in order to publish to multiple queues + # (https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchange-fanout) + channel.exchange_declare(RABBITMQ_EXCHANGE, exchange_type="fanout") + + for queue in self.queues: + channel.queue_declare(queue=queue, durable=True) + channel.queue_bind(exchange=RABBITMQ_EXCHANGE, queue=queue) + + self.channel = channel + + while self._unpublished_messages: + # Send unpublished messages + message = self._unpublished_messages.popleft() + self._publish(message) + logger.debug( + f"Published message from queue of unpublished messages. " + f"Remaining unpublished messages: {len(self._unpublished_messages)}." + ) + + def _run_pika_io_loop_in_thread(self) -> None: + thread = Thread(target=self._run_pika_io_loop, daemon=True) + thread.start() + + def _run_pika_io_loop(self) -> None: + # noinspection PyUnresolvedReferences + self._pika_connection.ioloop.start() + + def is_ready( + self, attempts: int = 1000, wait_time_between_attempts_in_seconds: float = 0.01 + ) -> bool: + """Spin until the pika channel is open. + + It typically takes 50 ms or so for the pika channel to open. We'll wait up + to 10 seconds just in case. + + Args: + attempts: Number of retries. + wait_time_between_attempts_in_seconds: Wait time between retries. - Perform `retries` publish attempts with `retry_delay_in_seconds` between them. + Returns: + `True` if the channel is available, `False` otherwise. """ + while attempts: + if self.channel: + return True + time.sleep(wait_time_between_attempts_in_seconds) + attempts -= 1 + return False + + def publish( + self, + event: Dict[Text, Any], + retries: int = 60, + retry_delay_in_seconds: int = 5, + headers: Optional[Dict[Text, Text]] = None, + ) -> None: + """Publish `event` into Pika queue. + + Args: + event: Serialised event to be published. + retries: Number of retries if publishing fails + retry_delay_in_seconds: Delay in seconds between retries. + headers: Message headers to append to the published message (key-value + dictionary). The headers can be retrieved in the consumer from the + `headers` attribute of the message's `BasicProperties`. + """ body = json.dumps(event) while retries: - # noinspection PyBroadException try: - self._publish(body) + self._publish(body, headers) return except Exception as e: logger.error( - "Could not open Pika channel at host '{}'. Failed with error: " - "{}".format(self.host, e) + f"Could not open Pika channel at host '{self.host}'. " + f"Failed with error: {e}" ) self.channel = None + if self.raise_on_failure: + raise e retries -= 1 time.sleep(retry_delay_in_seconds) - logger.error( - "Failed to publish Pika event to queue '{}' on host " - "'{}':\n{}".format(self.queue, self.host, body) - ) + logger.error(f"Failed to publish Pika event on host '{self.host}':\n{body}") + + def _get_message_properties( + self, headers: Optional[Dict[Text, Text]] = None + ) -> "BasicProperties": + """Create RabbitMQ message `BasicProperties`. - def _publish(self, body: Text) -> None: - if not self.channel: - self.channel = self._open_channel(connection_attempts=1) + The `app_id` property is set to the value of `self.rasa_environment` if + present, and the message delivery mode is set to 2 (persistent). In + addition, the `headers` property is set if supplied. - self.channel.basic_publish("", self.queue, body) + Args: + headers: Message headers to add to the message properties of the + published message (key-value dictionary). The headers can be retrieved in + the consumer from the `headers` attribute of the message's + `BasicProperties`. + + Returns: + `pika.spec.BasicProperties` with the `RASA_ENVIRONMENT` environment variable + as the properties' `app_id` value, `delivery_mode`=2 and `headers` as the + properties' headers. + """ + from pika.spec import BasicProperties + + # make message persistent + kwargs = {"delivery_mode": 2} + + if self.rasa_environment: + kwargs["app_id"] = self.rasa_environment + + if headers: + kwargs["headers"] = headers + + return BasicProperties(**kwargs) + + def _basic_publish( + self, body: Text, headers: Optional[Dict[Text, Text]] = None + ) -> None: + self.channel.basic_publish( + exchange=RABBITMQ_EXCHANGE, + routing_key="", + body=body.encode(DEFAULT_ENCODING), + properties=self._get_message_properties(headers), + ) logger.debug( - "Published Pika events to queue '{}' on host " - "'{}':\n{}".format(self.queue, self.host, body) + f"Published Pika events to exchange '{RABBITMQ_EXCHANGE}' on host " + f"'{self.host}':\n{body}" ) + + def _publish(self, body: Text, headers: Optional[Dict[Text, Text]] = None) -> None: + if self._pika_connection.is_closed: + # Try to reset connection + self._run_pika() + self._basic_publish(body, headers) + elif not self.channel and self.should_keep_unpublished_messages: + logger.warning( + f"RabbitMQ channel has not been assigned. Adding message to " + f"list of unpublished messages and trying to publish them " + f"later. Current number of unpublished messages is " + f"{len(self._unpublished_messages)}." + ) + self._unpublished_messages.append(body) + else: + self._basic_publish(body, headers) + + +def create_rabbitmq_ssl_options( + rabbitmq_host: Optional[Text] = None, +) -> Optional["pika.SSLOptions"]: + """Create RabbitMQ SSL options. + + Requires the following environment variables to be set: + + RABBITMQ_SSL_CLIENT_CERTIFICATE - path to the SSL client certificate (required) + RABBITMQ_SSL_CLIENT_KEY - path to the SSL client key (required) + RABBITMQ_SSL_CA_FILE - path to the SSL CA file for verification (optional) + RABBITMQ_SSL_KEY_PASSWORD - SSL private key password (optional) + + Details on how to enable RabbitMQ TLS support can be found here: + https://www.rabbitmq.com/ssl.html#enabling-tls + + Args: + rabbitmq_host: RabbitMQ hostname + + Returns: + Pika SSL context of type `pika.SSLOptions` if + the RABBITMQ_SSL_CLIENT_CERTIFICATE and RABBITMQ_SSL_CLIENT_KEY + environment variables are valid paths, else `None`. + """ + client_certificate_path = os.environ.get("RABBITMQ_SSL_CLIENT_CERTIFICATE") + client_key_path = os.environ.get("RABBITMQ_SSL_CLIENT_KEY") + + if client_certificate_path and client_key_path: + import pika + import rasa.server + + logger.debug(f"Configuring SSL context for RabbitMQ host '{rabbitmq_host}'.") + + ca_file_path = os.environ.get("RABBITMQ_SSL_CA_FILE") + key_password = os.environ.get("RABBITMQ_SSL_KEY_PASSWORD") + + ssl_context = rasa.server.create_ssl_context( + client_certificate_path, client_key_path, ca_file_path, key_password + ) + return pika.SSLOptions(ssl_context, rabbitmq_host) + else: + return None diff --git a/rasa/core/brokers/sql.py b/rasa/core/brokers/sql.py index e0c993c73ab1..4a2fc3ae4419 100644 --- a/rasa/core/brokers/sql.py +++ b/rasa/core/brokers/sql.py @@ -1,15 +1,17 @@ +import contextlib import json import logging from typing import Any, Dict, Optional, Text -from rasa.core.brokers.event_channel import EventChannel +from rasa.constants import DOCS_URL_EVENT_BROKERS +from rasa.core.brokers.broker import EventBroker +from rasa.utils.common import raise_warning from rasa.utils.endpoints import EndpointConfig -import contextlib logger = logging.getLogger(__name__) -class SQLProducer(EventChannel): +class SQLEventBroker(EventBroker): """Save events into an SQL database. All events will be stored in a table called `events`. @@ -44,14 +46,14 @@ def __init__( dialect, host, port, db, username, password ) - logger.debug("SQLProducer: Connecting to database: '{}'.".format(engine_url)) + logger.debug(f"SQLEventBroker: Connecting to database: '{engine_url}'.") self.engine = sqlalchemy.create_engine(engine_url) self.Base.metadata.create_all(self.engine) self.sessionmaker = sqlalchemy.orm.sessionmaker(bind=self.engine) @classmethod - def from_endpoint_config(cls, broker_config: EndpointConfig) -> "EventChannel": + def from_endpoint_config(cls, broker_config: EndpointConfig) -> "SQLEventBroker": return cls(host=broker_config.url, **broker_config.kwargs) @contextlib.contextmanager diff --git a/rasa/core/brokers/utils.py b/rasa/core/brokers/utils.py deleted file mode 100644 index a1a9b5d05acf..000000000000 --- a/rasa/core/brokers/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -import logging -import typing -from typing import Optional - -import rasa.utils.common as rasa_utils -from rasa.utils.endpoints import EndpointConfig - -if typing.TYPE_CHECKING: - from rasa.core.brokers.event_channel import EventChannel - -logger = logging.getLogger(__name__) - - -def from_endpoint_config( - broker_config: Optional[EndpointConfig] -) -> Optional["EventChannel"]: - """Instantiate an event channel based on its configuration.""" - - if broker_config is None: - return None - elif broker_config.type == "pika" or broker_config.type is None: - from rasa.core.brokers.pika import PikaProducer - - return PikaProducer.from_endpoint_config(broker_config) - elif broker_config.type.lower() == "sql": - from rasa.core.brokers.sql import SQLProducer - - return SQLProducer.from_endpoint_config(broker_config) - elif broker_config.type == "file": - from rasa.core.brokers.file_producer import FileProducer - - return FileProducer.from_endpoint_config(broker_config) - elif broker_config.type == "kafka": - from rasa.core.brokers.kafka import KafkaProducer - - return KafkaProducer.from_endpoint_config(broker_config) - else: - return load_event_channel_from_module_string(broker_config) - - -def load_event_channel_from_module_string( - broker_config: EndpointConfig -) -> Optional["EventChannel"]: - """Instantiate an event channel based on its class name.""" - - try: - event_channel = rasa_utils.class_from_module_path(broker_config.type) - return event_channel.from_endpoint_config(broker_config) - except (AttributeError, ImportError) as e: - logger.warning( - "EventChannel type '{}' not found. " - "Not using any event channel. Error: {}".format(broker_config.type, e) - ) - return None diff --git a/rasa/core/channels/__init__.py b/rasa/core/channels/__init__.py index f31643966607..8ae53db2539d 100644 --- a/rasa/core/channels/__init__.py +++ b/rasa/core/channels/__init__.py @@ -1,11 +1,10 @@ -from typing import Text, Dict, List +from typing import Text, Dict, List, Type from rasa.core.channels.channel import ( InputChannel, OutputChannel, UserMessage, CollectingOutputChannel, - RestInput, ) # this prevents IDE's from optimizing the imports - we need to import the @@ -20,13 +19,15 @@ from rasa.core.channels.facebook import FacebookInput # nopep8 from rasa.core.channels.mattermost import MattermostInput # nopep8 from rasa.core.channels.rasa_chat import RasaChatInput # nopep8 +from rasa.core.channels.rest import RestInput # nopep8 from rasa.core.channels.rocketchat import RocketChatInput # nopep8 from rasa.core.channels.slack import SlackInput # nopep8 from rasa.core.channels.telegram import TelegramInput # nopep8 from rasa.core.channels.twilio import TwilioInput # nopep8 from rasa.core.channels.webexteams import WebexTeamsInput # nopep8 +from rasa.core.channels.hangouts import HangoutsInput # nopep8 -input_channel_classes = [ +input_channel_classes: List[Type[InputChannel]] = [ CmdlineInput, FacebookInput, SlackInput, @@ -40,9 +41,10 @@ RestInput, SocketIOInput, WebexTeamsInput, -] # type: List[InputChannel] + HangoutsInput, +] -# Mapping from a input channel name to its class to allow name based lookup. -BUILTIN_CHANNELS = { +# Mapping from an input channel name to its class to allow name based lookup. +BUILTIN_CHANNELS: Dict[Text, Type[InputChannel]] = { c.name(): c for c in input_channel_classes -} # type: Dict[Text, InputChannel] +} diff --git a/rasa/core/channels/botframework.py b/rasa/core/channels/botframework.py index f8ce1ae7e114..590497bfa40a 100644 --- a/rasa/core/channels/botframework.py +++ b/rasa/core/channels/botframework.py @@ -1,14 +1,13 @@ -# -*- coding: utf-8 -*- - import datetime import json import logging import requests from sanic import Blueprint, response from sanic.request import Request -from typing import Text, Dict, Any, List, Iterable +from typing import Text, Dict, Any, List, Iterable, Callable, Awaitable, Optional from rasa.core.channels.channel import UserMessage, OutputChannel, InputChannel +from sanic.response import HTTPResponse logger = logging.getLogger(__name__) @@ -25,7 +24,7 @@ class BotFramework(OutputChannel): headers = None @classmethod - def name(cls): + def name(cls) -> Text: return "botframework" def __init__( @@ -37,15 +36,19 @@ def __init__( service_url: Text, ) -> None: + service_url = ( + f"{service_url}/" if not service_url.endswith("/") else service_url + ) + self.app_id = app_id self.app_password = app_password self.conversation = conversation - self.global_uri = "{}v3/".format(service_url) + self.global_uri = f"{service_url}v3/" self.bot = bot - async def _get_headers(self): + async def _get_headers(self) -> Optional[Dict[Text, Any]]: if BotFramework.token_expiration_date < datetime.datetime.now(): - uri = "{}/{}".format(MICROSOFT_OAUTH2_URL, MICROSOFT_OAUTH2_PATH) + uri = f"{MICROSOFT_OAUTH2_URL}/{MICROSOFT_OAUTH2_PATH}" grant_type = "client_credentials" scope = "https://api.botframework.com/.default" payload = { @@ -106,7 +109,7 @@ async def send(self, message_data: Dict[Text, Any]) -> None: async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): text_message = {"text": message_part} message = self.prepare_message(recipient_id, text_message) await self.send(message) @@ -128,7 +131,7 @@ async def send_text_with_buttons( recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: hero_content = { "contentType": "application/vnd.microsoft.card.hero", @@ -165,15 +168,17 @@ class BotFrameworkInput(InputChannel): """Bot Framework input channel implementation.""" @classmethod - def name(cls): + def name(cls) -> Text: return "botframework" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls(credentials.get("app_id"), credentials.get("app_password")) + # pytype: enable=attribute-error def __init__(self, app_id: Text, app_password: Text) -> None: """Create a Bot Framework input channel. @@ -186,18 +191,40 @@ def __init__(self, app_id: Text, app_password: Text) -> None: self.app_id = app_id self.app_password = app_password - def blueprint(self, on_new_message): + @staticmethod + def add_attachments_to_metadata( + postdata: Dict[Text, Any], metadata: Optional[Dict[Text, Any]] + ) -> Optional[Dict[Text, Any]]: + """Merge the values of `postdata['attachments']` with `metadata`.""" + + if postdata.get("attachments"): + attachments = {"attachments": postdata["attachments"]} + if metadata: + metadata.update(attachments) + else: + metadata = attachments + + return metadata + + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: botframework_webhook = Blueprint("botframework_webhook", __name__) # noinspection PyUnusedLocal @botframework_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(request: Request) -> HTTPResponse: return response.json({"status": "ok"}) @botframework_webhook.route("/webhook", methods=["POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: postdata = request.json + metadata = self.get_metadata(request) + + metadata_with_attachments = self.add_attachments_to_metadata( + postdata, metadata + ) try: if postdata["type"] == "message": @@ -210,16 +237,18 @@ async def webhook(request: Request): ) user_msg = UserMessage( - postdata["text"], - out_channel, - postdata["from"]["id"], + text=postdata.get("text", ""), + output_channel=out_channel, + sender_id=postdata["from"]["id"], input_channel=self.name(), + metadata=metadata_with_attachments, ) + await on_new_message(user_msg) else: logger.info("Not received message type") except Exception as e: - logger.error("Exception when trying to handle message.{0}".format(e)) + logger.error(f"Exception when trying to handle message.{e}") logger.debug(e, exc_info=True) pass diff --git a/rasa/core/channels/callback.py b/rasa/core/channels/callback.py index cbc4b75eb1ab..b2b28c10e513 100644 --- a/rasa/core/channels/callback.py +++ b/rasa/core/channels/callback.py @@ -1,25 +1,33 @@ import logging +from typing import Text, Dict, Optional, Callable, Awaitable, Any + from sanic import Blueprint, response from sanic.request import Request -from rasa.core.channels.channel import CollectingOutputChannel, UserMessage, RestInput +from rasa.core.channels.channel import ( + CollectingOutputChannel, + UserMessage, + InputChannel, +) +from rasa.core.channels.rest import RestInput from rasa.utils.endpoints import EndpointConfig, ClientResponseError +from sanic.response import HTTPResponse logger = logging.getLogger(__name__) class CallbackOutput(CollectingOutputChannel): @classmethod - def name(cls): + def name(cls) -> Text: return "callback" def __init__(self, endpoint: EndpointConfig) -> None: self.callback_endpoint = endpoint - super(CallbackOutput, self).__init__() + super().__init__() - async def _persist_message(self, message): - await super(CallbackOutput, self)._persist_message(message) + async def _persist_message(self, message: Dict[Text, Any]) -> None: + await super()._persist_message(message) try: await self.callback_endpoint.request( @@ -40,25 +48,27 @@ class CallbackInput(RestInput): are sent asynchronously by calling a configured external REST endpoint.""" @classmethod - def name(cls): + def name(cls) -> Text: return "callback" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: return cls(EndpointConfig.from_dict(credentials)) - def __init__(self, endpoint): + def __init__(self, endpoint: EndpointConfig) -> None: self.callback_endpoint = endpoint - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: callback_webhook = Blueprint("callback_webhook", __name__) @callback_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request): return response.json({"status": "ok"}) @callback_webhook.route("/webhook", methods=["POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: sender_id = await self._extract_sender(request) text = self._extract_message(request) diff --git a/rasa/core/channels/channel.py b/rasa/core/channels/channel.py index 483b94e23ec5..6cfb56c57e81 100644 --- a/rasa/core/channels/channel.py +++ b/rasa/core/channels/channel.py @@ -1,14 +1,20 @@ -import asyncio -import inspect import json import logging import uuid -from asyncio import Queue, CancelledError -from sanic import Sanic, Blueprint, response +from sanic import Sanic, Blueprint from sanic.request import Request -from typing import Text, List, Dict, Any, Optional, Callable, Iterable, Awaitable +from typing import ( + Text, + List, + Dict, + Any, + Optional, + Callable, + Iterable, + Awaitable, + NoReturn, +) -import rasa.utils.endpoints from rasa.cli import utils as cli_utils from rasa.constants import DOCS_BASE_URL from rasa.core import utils @@ -21,7 +27,7 @@ logger = logging.getLogger(__name__) -class UserMessage(object): +class UserMessage: """Represents an incoming message. Includes the channel the responses should be sent to.""" @@ -90,21 +96,21 @@ async def handler(*args, **kwargs): app.input_channels = input_channels -class InputChannel(object): +class InputChannel: @classmethod - def name(cls): + def name(cls) -> Text: """Every input channel needs a name to identify it.""" return cls.__name__ @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> "InputChannel": return cls() - def url_prefix(self): + def url_prefix(self) -> Text: return self.name() def blueprint( - self, on_new_message: Callable[[UserMessage], Awaitable[None]] + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] ) -> Blueprint: """Defines a Sanic blueprint. @@ -113,7 +119,7 @@ def blueprint( raise NotImplementedError("Component listener needs to provide blueprint.") @classmethod - def raise_missing_credentials_exception(cls): + def raise_missing_credentials_exception(cls) -> NoReturn: raise Exception( "To use the {} input channel, you need to " "pass a credentials file using '--credentials'. " @@ -139,15 +145,30 @@ def get_output_channel(self) -> Optional["OutputChannel"]: """ pass + def get_metadata(self, request: Request) -> Optional[Dict[Text, Any]]: + """Extracts additional information from the incoming request. -class OutputChannel(object): + Implementing this function is not required. However, it can be used to extract + metadata from the request. The return value is passed on to the + ``UserMessage`` object and stored in the conversation tracker. + + Args: + request: incoming request with the message of the user + + Returns: + Metadata which was extracted from the request. + """ + pass + + +class OutputChannel: """Output channel base class. Provides sane implementation of the send methods for text only output channels.""" @classmethod - def name(cls): + def name(cls) -> Text: """Every output channel needs a name to identify it.""" return cls.__name__ @@ -159,7 +180,7 @@ async def send_response(self, recipient_id: Text, message: Dict[Text, Any]) -> N recipient_id, message.pop("text"), message.pop("quick_replies"), - **message + **message, ) elif message.get("buttons"): await self.send_text_with_buttons( @@ -197,45 +218,43 @@ async def send_image_url( ) -> None: """Sends an image. Default will just post the url as a string.""" - await self.send_text_message(recipient_id, "Image: {}".format(image), **kwargs) + await self.send_text_message(recipient_id, f"Image: {image}") async def send_attachment( self, recipient_id: Text, attachment: Text, **kwargs: Any ) -> None: """Sends an attachment. Default will just post as a string.""" - await self.send_text_message( - recipient_id, "Attachment: {}".format(attachment), **kwargs - ) + await self.send_text_message(recipient_id, f"Attachment: {attachment}") async def send_text_with_buttons( self, recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: """Sends buttons to the output. Default implementation will just post the buttons as a string.""" - await self.send_text_message(recipient_id, text, **kwargs) + await self.send_text_message(recipient_id, text) for idx, button in enumerate(buttons): button_msg = cli_utils.button_to_string(button, idx) - await self.send_text_message(recipient_id, button_msg, **kwargs) + await self.send_text_message(recipient_id, button_msg) async def send_quick_replies( self, recipient_id: Text, text: Text, quick_replies: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: """Sends quick replies to the output. Default implementation will just send as buttons.""" - await self.send_text_with_buttons(recipient_id, text, quick_replies, **kwargs) + await self.send_text_with_buttons(recipient_id, text, quick_replies) async def send_elements( self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any @@ -244,15 +263,12 @@ async def send_elements( Default implementation will just post the elements as a string.""" - # we can't pass the empty "buttons" key of the message through to send_text_with_buttons() - kwargs.pop("buttons", None) - for element in elements: element_msg = "{title} : {subtitle}".format( title=element.get("title", ""), subtitle=element.get("subtitle", "") ) await self.send_text_with_buttons( - recipient_id, element_msg, element.get("buttons", []), **kwargs + recipient_id, element_msg, element.get("buttons", []) ) async def send_custom_json( @@ -262,7 +278,7 @@ async def send_custom_json( Default implementation will just post the json contents as a string.""" - await self.send_text_message(recipient_id, json.dumps(json_message), **kwargs) + await self.send_text_message(recipient_id, json.dumps(json_message)) class CollectingOutputChannel(OutputChannel): @@ -270,17 +286,22 @@ class CollectingOutputChannel(OutputChannel): (doesn't send them anywhere, just collects them).""" - def __init__(self): + def __init__(self) -> None: self.messages = [] @classmethod - def name(cls): + def name(cls) -> Text: return "collector" @staticmethod def _message( - recipient_id, text=None, image=None, buttons=None, attachment=None, custom=None - ): + recipient_id: Text, + text: Text = None, + image: Text = None, + buttons: List[Dict[Text, Any]] = None, + attachment: Text = None, + custom: Dict[Text, Any] = None, + ) -> Dict: """Create a message object that will be stored.""" obj = { @@ -295,19 +316,19 @@ def _message( # filter out any values that are `None` return utils.remove_none_values(obj) - def latest_output(self): + def latest_output(self) -> Optional[Dict[Text, Any]]: if self.messages: return self.messages[-1] else: return None - async def _persist_message(self, message) -> None: + async def _persist_message(self, message: Dict[Text, Any]) -> None: self.messages.append(message) # pytype: disable=bad-return-type async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): await self._persist_message(self._message(recipient_id, text=message_part)) async def send_image_url( @@ -329,7 +350,7 @@ async def send_text_with_buttons( recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: await self._persist_message( self._message(recipient_id, text=text, buttons=buttons) @@ -339,136 +360,3 @@ async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: await self._persist_message(self._message(recipient_id, custom=json_message)) - - -class QueueOutputChannel(CollectingOutputChannel): - """Output channel that collects send messages in a list - - (doesn't send them anywhere, just collects them).""" - - @classmethod - def name(cls): - return "queue" - - # noinspection PyMissingConstructor - def __init__(self, message_queue: Optional[Queue] = None) -> None: - super(QueueOutputChannel, self).__init__() - self.messages = Queue() if not message_queue else message_queue - - def latest_output(self): - raise NotImplementedError("A queue doesn't allow to peek at messages.") - - async def _persist_message(self, message) -> None: - await self.messages.put(message) # pytype: disable=bad-return-type - - -class RestInput(InputChannel): - """A custom http input channel. - - This implementation is the basis for a custom implementation of a chat - frontend. You can customize this to send messages to Rasa Core and - retrieve responses from the agent.""" - - @classmethod - def name(cls): - return "rest" - - @staticmethod - async def on_message_wrapper( - on_new_message: Callable[[UserMessage], Awaitable[None]], - text: Text, - queue: Queue, - sender_id: Text, - input_channel: Text, - ) -> None: - collector = QueueOutputChannel(queue) - - message = UserMessage(text, collector, sender_id, input_channel=input_channel) - await on_new_message(message) - - await queue.put("DONE") # pytype: disable=bad-return-type - - async def _extract_sender(self, req: Request) -> Optional[Text]: - return req.json.get("sender", None) - - # noinspection PyMethodMayBeStatic - def _extract_message(self, req: Request) -> Optional[Text]: - return req.json.get("message", None) - - def _extract_input_channel(self, req: Request) -> Text: - return req.json.get("input_channel") or self.name() - - def stream_response( - self, - on_new_message: Callable[[UserMessage], Awaitable[None]], - text: Text, - sender_id: Text, - input_channel: Text, - ) -> Callable[[Any], Awaitable[None]]: - async def stream(resp: Any) -> None: - q = Queue() - task = asyncio.ensure_future( - self.on_message_wrapper( - on_new_message, text, q, sender_id, input_channel - ) - ) - result = None # declare variable up front to avoid pytype error - while True: - result = await q.get() - if result == "DONE": - break - else: - await resp.write(json.dumps(result) + "\n") - await task - - return stream # pytype: disable=bad-return-type - - def blueprint(self, on_new_message: Callable[[UserMessage], Awaitable[None]]): - custom_webhook = Blueprint( - "custom_webhook_{}".format(type(self).__name__), - inspect.getmodule(self).__name__, - ) - - # noinspection PyUnusedLocal - @custom_webhook.route("/", methods=["GET"]) - async def health(request: Request): - return response.json({"status": "ok"}) - - @custom_webhook.route("/webhook", methods=["POST"]) - async def receive(request: Request): - sender_id = await self._extract_sender(request) - text = self._extract_message(request) - should_use_stream = rasa.utils.endpoints.bool_arg( - request, "stream", default=False - ) - input_channel = self._extract_input_channel(request) - - if should_use_stream: - return response.stream( - self.stream_response( - on_new_message, text, sender_id, input_channel - ), - content_type="text/event-stream", - ) - else: - collector = CollectingOutputChannel() - # noinspection PyBroadException - try: - await on_new_message( - UserMessage( - text, collector, sender_id, input_channel=input_channel - ) - ) - except CancelledError: - logger.error( - "Message handling timed out for " - "user message '{}'.".format(text) - ) - except Exception: - logger.exception( - "An exception occured while handling " - "user message '{}'.".format(text) - ) - return response.json(collector.messages) - - return custom_webhook diff --git a/rasa/core/channels/console.py b/rasa/core/channels/console.py index da2377680ae5..34f6569d9427 100644 --- a/rasa/core/channels/console.py +++ b/rasa/core/channels/console.py @@ -1,51 +1,68 @@ # this builtin is needed so we can overwrite in test -import aiohttp +import asyncio import json import logging -import questionary -from typing import Text, Optional +import os +import aiohttp +import questionary from aiohttp import ClientTimeout -from async_generator import async_generator, yield_ from prompt_toolkit.styles import Style +from typing import Any +from typing import Text, Optional, Dict, List from rasa.cli import utils as cli_utils - from rasa.core import utils -from rasa.core.channels.channel import UserMessage -from rasa.core.channels.channel import RestInput +from rasa.core.channels.rest import RestInput from rasa.core.constants import DEFAULT_SERVER_URL from rasa.core.interpreter import INTENT_MESSAGE_PREFIX +from rasa.utils.io import DEFAULT_ENCODING logger = logging.getLogger(__name__) - +STREAM_READING_TIMEOUT_ENV = "RASA_SHELL_STREAM_READING_TIMEOUT_IN_SECONDS" DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS = 10 -def print_bot_output( - message, color=cli_utils.bcolors.OKBLUE +def print_buttons( + message: Dict[Text, Any], + is_latest_message: bool = False, + color=cli_utils.bcolors.OKBLUE, ) -> Optional[questionary.Question]: - if ("text" in message) and not ("buttons" in message): - cli_utils.print_color(message.get("text"), color=color) - - if "image" in message: - cli_utils.print_color("Image: " + message.get("image"), color=color) - - if "attachment" in message: - cli_utils.print_color("Attachment: " + message.get("attachment"), color=color) - - if "buttons" in message: + if is_latest_message: choices = cli_utils.button_choices_from_message_data( message, allow_free_text_input=True ) - question = questionary.select( message.get("text"), choices, style=Style([("qmark", "#6d91d3"), ("", "#6d91d3"), ("answer", "#b373d6")]), ) return question + else: + cli_utils.print_color("Buttons:", color=color) + for idx, button in enumerate(message.get("buttons")): + cli_utils.print_color(cli_utils.button_to_string(button, idx), color=color) + + +def print_bot_output( + message: Dict[Text, Any], + is_latest_message: bool = False, + color=cli_utils.bcolors.OKBLUE, +) -> Optional[questionary.Question]: + if "buttons" in message: + question = print_buttons(message, is_latest_message, color) + if question: + return question + + if "text" in message: + cli_utils.print_color(message.get("text"), color=color) + + if "image" in message: + cli_utils.print_color("Image: " + message.get("image"), color=color) + + if "attachment" in message: + cli_utils.print_color("Attachment: " + message.get("attachment"), color=color) if "elements" in message: cli_utils.print_color("Elements:", color=color) @@ -64,12 +81,16 @@ def print_bot_output( cli_utils.print_color(json.dumps(message.get("custom"), indent=2), color=color) -def get_user_input(button_question: questionary.Question) -> Optional[Text]: - if button_question is not None: - response = cli_utils.payload_from_button_question(button_question) +def get_user_input(previous_response: Optional[Dict[str, Any]]) -> Optional[Text]: + button_response = None + if previous_response is not None: + button_response = print_bot_output(previous_response, is_latest_message=True) + + if button_response is not None: + response = cli_utils.payload_from_button_question(button_response) if response == cli_utils.FREE_TEXT_INPUT_PROMPT: # Re-prompt user with a free text input - response = get_user_input(None) + response = get_user_input({}) else: response = questionary.text( "", @@ -79,43 +100,54 @@ def get_user_input(button_question: questionary.Question) -> Optional[Text]: return response.strip() if response is not None else None -async def send_message_receive_block(server_url, auth_token, sender_id, message): +async def send_message_receive_block( + server_url, auth_token, sender_id, message +) -> List[Dict[Text, Any]]: payload = {"sender": sender_id, "message": message} - url = "{}/webhooks/rest/webhook?token={}".format(server_url, auth_token) + url = f"{server_url}/webhooks/rest/webhook?token={auth_token}" async with aiohttp.ClientSession() as session: async with session.post(url, json=payload, raise_for_status=True) as resp: return await resp.json() -@async_generator # needed for python 3.5 compatibility -async def send_message_receive_stream(server_url, auth_token, sender_id, message): +async def send_message_receive_stream( + server_url: Text, auth_token: Text, sender_id: Text, message: Text +): payload = {"sender": sender_id, "message": message} - url = "{}/webhooks/rest/webhook?stream=true&token={}".format(server_url, auth_token) + url = f"{server_url}/webhooks/rest/webhook?stream=true&token={auth_token}" # Define timeout to not keep reading in case the server crashed in between - timeout = ClientTimeout(DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS) - # TODO: check if this properly receives UTF-8 data + timeout = _get_stream_reading_timeout() + async with aiohttp.ClientSession(timeout=timeout) as session: async with session.post(url, json=payload, raise_for_status=True) as resp: async for line in resp.content: if line: - await yield_(json.loads(line.decode("utf-8"))) + yield json.loads(line.decode(DEFAULT_ENCODING)) + + +def _get_stream_reading_timeout() -> ClientTimeout: + timeout_in_seconds = int( + os.environ.get( + STREAM_READING_TIMEOUT_ENV, DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS + ) + ) + + return ClientTimeout(timeout_in_seconds) async def record_messages( + sender_id, server_url=DEFAULT_SERVER_URL, - auth_token=None, - sender_id=UserMessage.DEFAULT_SENDER_ID, + auth_token="", max_message_limit=None, use_response_stream=True, -): +) -> int: """Read messages from the command line and print bot responses.""" - auth_token = auth_token if auth_token else "" - exit_text = INTENT_MESSAGE_PREFIX + "stop" cli_utils.print_success( @@ -124,9 +156,10 @@ async def record_messages( ) num_messages = 0 - button_question = None + previous_response = None + await asyncio.sleep(0.5) # Wait for server to start while not utils.is_limit_reached(num_messages, max_message_limit): - text = get_user_input(button_question) + text = get_user_input(previous_response) if text == exit_text or text is None: break @@ -135,23 +168,30 @@ async def record_messages( bot_responses = send_message_receive_stream( server_url, auth_token, sender_id, text ) + previous_response = None async for response in bot_responses: - button_question = print_bot_output(response) + if previous_response is not None: + print_bot_output(previous_response) + previous_response = response else: bot_responses = await send_message_receive_block( server_url, auth_token, sender_id, text ) + previous_response = None for response in bot_responses: - button_question = print_bot_output(response) + if previous_response is not None: + print_bot_output(previous_response) + previous_response = response num_messages += 1 + await asyncio.sleep(0) # Yield event loop for others coroutines return num_messages class CmdlineInput(RestInput): @classmethod - def name(cls): + def name(cls) -> Text: return "cmdline" - def url_prefix(self): + def url_prefix(self) -> Text: return RestInput.name() diff --git a/rasa/core/channels/facebook.py b/rasa/core/channels/facebook.py index 5d1f1141ffec..de005eb11e8f 100644 --- a/rasa/core/channels/facebook.py +++ b/rasa/core/channels/facebook.py @@ -5,11 +5,13 @@ from fbmessenger.attachments import Image from fbmessenger.elements import Text as FBText from fbmessenger.quick_replies import QuickReplies, QuickReply +from rasa.utils.common import raise_warning from sanic import Blueprint, response from sanic.request import Request -from typing import Text, List, Dict, Any, Callable, Awaitable, Iterable +from typing import Text, List, Dict, Any, Callable, Awaitable, Iterable, Optional from rasa.core.channels.channel import UserMessage, OutputChannel, InputChannel +from sanic.response import HTTPResponse logger = logging.getLogger(__name__) @@ -18,13 +20,13 @@ class Messenger: """Implement a fbmessenger to parse incoming webhooks and send msgs.""" @classmethod - def name(cls): + def name(cls) -> Text: return "facebook" def __init__( self, page_access_token: Text, - on_new_message: Callable[[UserMessage], Awaitable[None]], + on_new_message: Callable[[UserMessage], Awaitable[Any]], ) -> None: self.on_new_message = on_new_message @@ -43,6 +45,33 @@ def _is_audio_message(message: Dict[Text, Any]) -> bool: and message["message"]["attachments"][0]["type"] == "audio" ) + @staticmethod + def _is_image_message(message: Dict[Text, Any]) -> bool: + """Check if the users message is an image.""" + return ( + "message" in message + and "attachments" in message["message"] + and message["message"]["attachments"][0]["type"] == "image" + ) + + @staticmethod + def _is_video_message(message: Dict[Text, Any]) -> bool: + """Check if the users message is a video.""" + return ( + "message" in message + and "attachments" in message["message"] + and message["message"]["attachments"][0]["type"] == "video" + ) + + @staticmethod + def _is_file_message(message: Dict[Text, Any]) -> bool: + """Check if the users message is a file.""" + return ( + "message" in message + and "attachments" in message["message"] + and message["message"]["attachments"][0]["type"] == "file" + ) + @staticmethod def _is_user_message(message: Dict[Text, Any]) -> bool: """Check if the message is a message from the user""" @@ -61,16 +90,18 @@ def _is_quick_reply_message(message: Dict[Text, Any]) -> bool: and message["message"]["quick_reply"].get("payload") ) - async def handle(self, payload): + async def handle(self, payload: Dict, metadata: Optional[Dict[Text, Any]]) -> None: for entry in payload["entry"]: for message in entry["messaging"]: self.last_message = message if message.get("message"): - return await self.message(message) + return await self.message(message, metadata) elif message.get("postback"): - return await self.postback(message) + return await self.postback(message, metadata) - async def message(self, message: Dict[Text, Any]) -> None: + async def message( + self, message: Dict[Text, Any], metadata: Optional[Dict[Text, Any]] + ) -> None: """Handle an incoming event from the fb webhook.""" # quick reply and user message both share 'text' attribute @@ -82,26 +113,41 @@ async def message(self, message: Dict[Text, Any]) -> None: elif self._is_audio_message(message): attachment = message["message"]["attachments"][0] text = attachment["payload"]["url"] + elif self._is_image_message(message): + attachment = message["message"]["attachments"][0] + text = attachment["payload"]["url"] + elif self._is_video_message(message): + attachment = message["message"]["attachments"][0] + text = attachment["payload"]["url"] + elif self._is_file_message(message): + attachment = message["message"]["attachments"][0] + text = attachment["payload"]["url"] else: logger.warning( "Received a message from facebook that we can not " - "handle. Message: {}".format(message) + f"handle. Message: {message}" ) return - await self._handle_user_message(text, self.get_user_id()) + await self._handle_user_message(text, self.get_user_id(), metadata) - async def postback(self, message: Dict[Text, Any]) -> None: + async def postback( + self, message: Dict[Text, Any], metadata: Optional[Dict[Text, Any]] + ) -> None: """Handle a postback (e.g. quick reply button).""" text = message["postback"]["payload"] - await self._handle_user_message(text, self.get_user_id()) + await self._handle_user_message(text, self.get_user_id(), metadata) - async def _handle_user_message(self, text: Text, sender_id: Text) -> None: + async def _handle_user_message( + self, text: Text, sender_id: Text, metadata: Optional[Dict[Text, Any]] + ) -> None: """Pass on the text to the dialogue engine for processing.""" out_channel = MessengerBot(self.client) - user_msg = UserMessage(text, out_channel, sender_id, input_channel=self.name()) + user_msg = UserMessage( + text, out_channel, sender_id, input_channel=self.name(), metadata=metadata + ) # noinspection PyBroadException try: @@ -117,13 +163,13 @@ class MessengerBot(OutputChannel): """A bot that uses fb-messenger to communicate.""" @classmethod - def name(cls): + def name(cls) -> Text: return "facebook" def __init__(self, messenger_client: MessengerClient) -> None: self.messenger_client = messenger_client - super(MessengerBot, self).__init__() + super().__init__() def send(self, recipient_id: Text, element: Any) -> None: """Sends a message to the recipient using the messenger client.""" @@ -138,7 +184,7 @@ async def send_text_message( ) -> None: """Send a message through this channel.""" - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): self.send(recipient_id, FBText(text=message_part)) async def send_image_url( @@ -153,13 +199,13 @@ async def send_text_with_buttons( recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: """Sends buttons to the output.""" # buttons is a list of tuples: [(option_name,payload)] if len(buttons) > 3: - logger.warning( + raise_warning( "Facebook API currently allows only up to 3 buttons. " "If you add more, all will be ignored." ) @@ -187,7 +233,7 @@ async def send_quick_replies( recipient_id: Text, text: Text, quick_replies: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: """Sends quick replies to the output.""" @@ -200,7 +246,8 @@ async def send_elements( """Sends elements to the output.""" for element in elements: - self._add_postback_info(element["buttons"]) + if "buttons" in element: + self._add_postback_info(element["buttons"]) payload = { "attachment": { @@ -252,19 +299,21 @@ class FacebookInput(InputChannel): """Facebook input channel implementation. Based on the HTTPInputChannel.""" @classmethod - def name(cls): + def name(cls) -> Text: return "facebook" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls( credentials.get("verify"), credentials.get("secret"), credentials.get("page-access-token"), ) + # pytype: enable=attribute-error def __init__(self, fb_verify: Text, fb_secret: Text, fb_access_token: Text) -> None: """Create a facebook input channel. @@ -284,17 +333,19 @@ def __init__(self, fb_verify: Text, fb_secret: Text, fb_access_token: Text) -> N self.fb_secret = fb_secret self.fb_access_token = fb_access_token - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: fb_webhook = Blueprint("fb_webhook", __name__) # noinspection PyUnusedLocal @fb_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(request: Request) -> HTTPResponse: return response.json({"status": "ok"}) @fb_webhook.route("/webhook", methods=["GET"]) - async def token_verification(request: Request): + async def token_verification(request: Request) -> HTTPResponse: if request.args.get("hub.verify_token") == self.fb_verify: return response.text(request.args.get("hub.challenge")) else: @@ -305,7 +356,7 @@ async def token_verification(request: Request): return response.text("failure, invalid token") @fb_webhook.route("/webhook", methods=["POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: signature = request.headers.get("X-Hub-Signature") or "" if not self.validate_hub_signature(self.fb_secret, request.body, signature): logger.warning( @@ -316,13 +367,16 @@ async def webhook(request: Request): messenger = Messenger(self.fb_access_token, on_new_message) - await messenger.handle(request.json) + metadata = self.get_metadata(request) + await messenger.handle(request.json, metadata) return response.text("success") return fb_webhook @staticmethod - def validate_hub_signature(app_secret, request_payload, hub_signature_header): + def validate_hub_signature( + app_secret, request_payload, hub_signature_header + ) -> bool: """Make sure the incoming webhook requests are properly signed. Args: diff --git a/rasa/core/channels/hangouts.py b/rasa/core/channels/hangouts.py new file mode 100644 index 000000000000..8377680ff92d --- /dev/null +++ b/rasa/core/channels/hangouts.py @@ -0,0 +1,309 @@ +import logging +from asyncio import CancelledError +from sanic import Blueprint, response +from sanic.request import Request +from typing import Text, List, Dict, Any, Optional, Callable, Iterable, Awaitable, Union + +from sanic.response import HTTPResponse +from sanic.exceptions import abort +from oauth2client import client +from oauth2client.crypt import AppIdentityError + +from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage + +logger = logging.getLogger(__name__) + +CHANNEL_NAME = "hangouts" +CERT_URI = "https://www.googleapis.com/service_accounts/v1/metadata/x509/chat@system.gserviceaccount.com" + + +class HangoutsOutput(OutputChannel): + @classmethod + def name(cls) -> Text: + return CHANNEL_NAME + + def __init__(self) -> None: + self.messages = {} + + @staticmethod + def _text_card(message: Dict[Text, Any]) -> Dict: + + card = { + "cards": [ + { + "sections": [ + {"widgets": [{"textParagraph": {"text": message["text"]}}]} + ] + } + ] + } + return card + + @staticmethod + def _image_card(image: Text) -> Dict: + card = { + "cards": [{"sections": [{"widgets": [{"image": {"imageUrl": image}}]}]}] + } + return card + + @staticmethod + def _text_button_card(text: Text, buttons: List) -> Union[Dict, None]: + hangouts_buttons = [] + for b in buttons: + try: + b_txt, b_pl = b["title"], b["payload"] + except KeyError: + logger.error( + "Buttons must be a list of dicts with 'title' and 'payload' as keys" + ) + return + + hangouts_buttons.append( + { + "textButton": { + "text": b_txt, + "onClick": {"action": {"actionMethodName": b_pl}}, + } + } + ) + + card = { + "cards": [ + { + "sections": [ + { + "widgets": [ + {"textParagraph": {"text": text}}, + {"buttons": hangouts_buttons}, + ] + } + ] + } + ] + } + return card + + @staticmethod + def _combine_cards(c1: Dict, c2: Dict) -> Dict: + return {"cards": [*c1["cards"], *c2["cards"]]} + + async def _persist_message(self, message: Dict) -> None: + """Google Hangouts only accepts single dict with single key 'text' + for simple text messages. All other responses must be sent as cards. + + In case the bot sends multiple messages, all are transformed to either + cards or text output""" + + # check whether current and previous message will send 'text' or 'card' + if self.messages.get("text"): + msg_state = "text" + elif self.messages.get("cards"): + msg_state = "cards" + else: + msg_state = None + + if message.get("text"): + msg_new = "text" + elif message.get("cards"): + msg_new = "cards" + else: + raise Exception( + "Your message to Hangouts channel must either contain 'text' or 'cards'!" + ) + + # depending on above outcome, convert messages into same type and combine + if msg_new == msg_state == "text": + # two text messages are simply appended + new_text = " ".join([self.messages.get("text", ""), message["text"]]) + new_messages = {"text": new_text} + + elif msg_new == msg_state == "cards": + # two cards are combined into one + new_messages = self._combine_cards(self.messages, message) + + elif msg_state == "cards" and msg_new == "text": + # if any message is card, turn text message into TextParagraph card + # and combine cards + text_card = self._text_card(message) + new_messages = self._combine_cards(self.messages, text_card) + + elif msg_state == "text" and msg_new == "cards": + text_card = self._text_card(self.messages) + new_messages = self._combine_cards(text_card, message) + + elif msg_new == "text": + new_messages = {"text": message.get("text")} + else: + new_messages = message + + self.messages = new_messages + + async def send_text_message( + self, recipient_id: Text, text: Text, **kwargs: Any + ) -> None: + + await self._persist_message({"text": text}) + + async def send_image_url(self, recipient_id: Text, image: Text, **kwargs) -> None: + + await self._persist_message(self._image_card(image)) + + async def send_text_with_buttons( + self, recipient_id: Text, text: Text, buttons: List, **kwargs + ) -> None: + + await self._persist_message(self._text_button_card(text, buttons)) + + async def send_attachment( + self, recipient_id: Text, attachment: Text, **kwargs: Any + ): + + await self.send_text_message(recipient_id, attachment) + + async def send_elements( + self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any + ) -> None: + raise NotImplementedError + + async def send_custom_json( + self, recipient_id: Text, json_message: Dict, **kwargs + ) -> None: + """Custom json payload is simply forwarded to Google Hangouts without + any modifications. Use this for more complex cards, which can be created + in actions.py.""" + await self._persist_message(json_message) + + +# Google Hangouts input channel +class HangoutsInput(InputChannel): + """ + Channel that uses Google Hangouts Chat API to communicate. + """ + + @classmethod + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: + + if credentials: + return cls(credentials.get("project_id")) # pytype: disable=attribute-error + + return cls() + + def __init__( + self, + project_id: Optional[Text] = None, + hangouts_user_added_intent_name: Optional[Text] = "/user_added", + hangouts_room_added_intent_name: Optional[Text] = "/room_added", + hangouts_removed_intent_name: Optional[Text] = "/bot_removed", + ) -> None: + + self.project_id = project_id + self.hangouts_user_added_intent_name = hangouts_user_added_intent_name + self.hangouts_room_added_intent_name = hangouts_room_added_intent_name + self.hangouts_user_added_intent_name = hangouts_removed_intent_name + + @classmethod + def name(cls) -> Text: + return CHANNEL_NAME + + @staticmethod + def _extract_sender(req: Request) -> Text: + + if req.json["type"] == "MESSAGE": + return req.json["message"]["sender"]["displayName"] + + return req.json["user"]["displayName"] + + # noinspection PyMethodMayBeStatic + def _extract_message(self, req: Request) -> Text: + + if req.json["type"] == "MESSAGE": + message = req.json["message"]["text"] + + elif req.json["type"] == "CARD_CLICKED": + message = req.json["action"]["actionMethodName"] + + elif req.json["type"] == "ADDED_TO_SPACE": + if self._extract_room(req) and self.hangouts_room_added_intent_name: + message = self.hangouts_room_added_intent_name + elif not self._extract_room(req) and self.hangouts_user_added_intent_name: + message = self.hangouts_user_added_intent_name + + elif ( + req.json["type"] == "REMOVED_FROM_SPACE" + and self.hangouts_user_added_intent_name + ): + message = self.hangouts_user_added_intent_name + else: + message = "" + + return message + + @staticmethod + def _extract_room(req: Request) -> Union[Text, None]: + + if req.json["space"]["type"] == "ROOM": + return req.json["space"]["displayName"] + + def _extract_input_channel(self) -> Text: + return self.name() + + def _check_token(self, bot_token: Text) -> None: + # see https://developers.google.com/hangouts/chat/how-tos/bots-develop#verifying_bot_authenticity + try: + token = client.verify_id_token( + bot_token, self.project_id, cert_uri=CERT_URI + ) + + if token["iss"] != "chat@system.gserviceaccount.com": + abort(401) + except AppIdentityError: + abort(401) + + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[None]] + ) -> Blueprint: + + custom_webhook = Blueprint("hangouts_webhook", __name__) + + @custom_webhook.route("/", methods=["GET"]) + async def health(request: Request) -> HTTPResponse: + return response.json({"status": "ok"}) + + @custom_webhook.route("/webhook", methods=["POST"]) + async def receive(request: Request) -> HTTPResponse: + + if self.project_id: + token = request.headers.get("Authorization").replace("Bearer ", "") + self._check_token(token) + + sender_id = self._extract_sender(request) + room_name = self._extract_room(request) + text = self._extract_message(request) + if text is None: + return response.text("OK") + input_channel = self._extract_input_channel() + + collector = HangoutsOutput() + + try: + await on_new_message( + UserMessage( + text, + collector, + sender_id, + input_channel=input_channel, + metadata={"room": room_name}, + ) + ) + except CancelledError: + logger.error( + "Message handling timed out for " "user message '{}'.".format(text) + ) + except Exception as e: + logger.exception( + f"An exception occurred while handling user message: {e}, text: {text}" + ) + + return response.json(collector.messages) + + return custom_webhook diff --git a/rasa/core/channels/mattermost.py b/rasa/core/channels/mattermost.py index fecb1de47162..6eb51515934c 100644 --- a/rasa/core/channels/mattermost.py +++ b/rasa/core/channels/mattermost.py @@ -1,65 +1,155 @@ +import json + import logging -from mattermostwrapper import MattermostAPI +import requests +from requests import Response from sanic import Blueprint, response from sanic.request import Request -from typing import Text, Dict, Any +from typing import Text, Dict, Any, List, Callable, Awaitable, Optional +from rasa.constants import DOCS_URL_CONNECTORS from rasa.core.channels.channel import UserMessage, OutputChannel, InputChannel +from sanic.response import HTTPResponse + +from rasa.utils.common import raise_warning logger = logging.getLogger(__name__) -class MattermostBot(MattermostAPI, OutputChannel): +class MattermostBot(OutputChannel): """A Mattermost communication channel""" @classmethod - def name(cls): + def name(cls) -> Text: return "mattermost" - def __init__(self, url, team, user, pw, bot_channel): + @classmethod + def token_from_login(cls, url: Text, user: Text, password: Text) -> Optional[Text]: + """Retrieve access token for mattermost user.""" + + data = {"login_id": user, "password": password} + r = requests.post(url + "/users/login", data=json.dumps(data)) + if r.status_code == 200: + return r.headers["Token"] + else: + logger.error(f"Failed to login mattermost user {user}. Response: {r}") + return None + + def __init__( + self, url: Text, token: Text, bot_channel: Text, webhook_url: Optional[Text] + ) -> None: self.url = url - self.team = team - self.user = user - self.pw = pw + self.token = token self.bot_channel = bot_channel + self.webhook_url = webhook_url + + super(MattermostBot, self).__init__() + + def _post_message_to_channel(self, channel_id: Text, message: Text): + return self._post_data_to_channel( + {"channel_id": channel_id, "message": message} + ) - super(MattermostBot, self).__init__(url, team) - super(MattermostBot, self).login(user, pw) + def _post_data_to_channel(self, data) -> Response: + """Send a message to a mattermost channel.""" + + headers = {"Authorization": "Bearer " + self.token} + r = requests.post(self.url + "/posts", headers=headers, data=json.dumps(data)) + if not r.status_code == 200: + logger.error( + f"Failed to send message to mattermost channel " + f"{data.get('channel_id')}. Response: {r}" + ) + return r async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: - for message_part in text.split("\n\n"): - self.post_channel(self.bot_channel, message_part) + for message_part in text.strip().split("\n\n"): + self._post_message_to_channel(self.bot_channel, message_part) async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: json_message.setdefault("channel_id", self.bot_channel) json_message.setdefault("message", "") - self.post("/posts", json_message) + + self._post_data_to_channel(json_message) + + async def send_image_url( + self, recipient_id: Text, image: Text, **kwargs: Any + ) -> None: + """Sends an image.""" + + self._post_data_to_channel( + { + "channel_id": self.bot_channel, + "props": {"attachments": [{"image_url": image}]}, + } + ) + + async def send_text_with_buttons( + self, + recipient_id: Text, + text: Text, + buttons: List[Dict[Text, Any]], + **kwargs: Any, + ) -> None: + """Sends buttons to the output.""" + + # buttons are a list of objects: [(option_name, payload)] + # See https://docs.mattermost.com/developer/interactive-messages.html#message-buttons + + actions = [ + { + "name": button["title"], + "integration": { + "url": self.webhook_url, + "context": {"action": button["payload"]}, + }, + } + for button in buttons + ] + + props = {"attachments": [{"actions": actions}]} + + self._post_data_to_channel( + {"channel_id": self.bot_channel, "message": text, "props": props} + ) class MattermostInput(InputChannel): """Mattermost input channel implemenation.""" @classmethod - def name(cls): + def name(cls) -> Text: return "mattermost" @classmethod - def from_credentials(cls, credentials): - if not credentials: + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: + if credentials is None: cls.raise_missing_credentials_exception() - return cls( - credentials.get("url"), - credentials.get("team"), - credentials.get("user"), - credentials.get("pw"), - ) - - def __init__(self, url: Text, team: Text, user: Text, pw: Text) -> None: + # pytype: disable=attribute-error + if credentials.get("pw") is not None or credentials.get("user") is not None: + raise_warning( + "Mattermost recently switched to bot accounts. 'user' and 'pw' " + "should not be used anymore, you should rather convert your " + "account to a bot account and use a token. Password based " + "authentication will be removed in a future Rasa Open Source version.", + FutureWarning, + docs=DOCS_URL_CONNECTORS + "mattermost/", + ) + token = MattermostBot.token_from_login( + credentials.get("url"), credentials.get("user"), credentials.get("pw") + ) + else: + token = credentials.get("token") + + return cls(credentials.get("url"), token, credentials.get("webhook_url")) + # pytype: enable=attribute-error + + def __init__(self, url: Text, token: Text, webhook_url: Text) -> None: """Create a Mattermost input channel. Needs a couple of settings to properly authenticate and validate messages. @@ -67,46 +157,95 @@ def __init__(self, url: Text, team: Text, user: Text, pw: Text) -> None: Args: url: Your Mattermost team url including /v4 example https://mysite.example.com/api/v4 - team: Your mattermost team name - user: Your mattermost userid that will post messages - pw: Your mattermost password for your user + token: Your mattermost bot token + webhook_url: The mattermost callback url as specified + in the outgoing webhooks in mattermost example + https://mysite.example.com/webhooks/mattermost/webhook """ self.url = url - self.team = team - self.user = user - self.pw = pw + self.token = token + self.webhook_url = webhook_url + + async def message_with_trigger_word( + self, + on_new_message: Callable[[UserMessage], Awaitable[None]], + output: Dict[Text, Any], + metadata: Optional[Dict], + ) -> None: + # splitting to get rid of the @botmention + # trigger we are using for this + split_message = output["text"].split(" ", 1) + if len(split_message) >= 2: + message = split_message[1] + else: + message = output["text"] + + await self._handle_message( + message, output["user_id"], output["channel_id"], metadata, on_new_message + ) + + async def action_from_button( + self, + on_new_message: Callable[[UserMessage], Awaitable[None]], + output: Dict[Text, Any], + metadata: Optional[Dict], + ) -> None: + # get the action, the buttons triggers + action = output["context"]["action"] + + await self._handle_message( + action, output["user_id"], output["channel_id"], metadata, on_new_message + ) - def blueprint(self, on_new_message): + async def _handle_message( + self, + message: Text, + sender_id: Text, + bot_channel: Text, + metadata: Optional[Dict], + on_new_message: Callable[[UserMessage], Awaitable[None]], + ): + try: + out_channel = MattermostBot( + self.url, self.token, bot_channel, self.webhook_url + ) + user_msg = UserMessage( + message, + out_channel, + sender_id, + input_channel=self.name(), + metadata=metadata, + ) + await on_new_message(user_msg) + except Exception as e: + logger.error(f"Exception when trying to handle message.{e}") + logger.debug(e, exc_info=True) + + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[None]] + ) -> Blueprint: mattermost_webhook = Blueprint("mattermost_webhook", __name__) @mattermost_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @mattermost_webhook.route("/webhook", methods=["POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: output = request.json - if output: - # splitting to get rid of the @botmention - # trigger we are using for this - text = output["text"].split(" ", 1) - text = text[1] - sender_id = output["user_id"] - self.bot_channel = output["channel_id"] - try: - out_channel = MattermostBot( - self.url, self.team, self.user, self.pw, self.bot_channel - ) - user_msg = UserMessage( - text, out_channel, sender_id, input_channel=self.name() - ) - await on_new_message(user_msg) - except Exception as e: - logger.error( - "Exception when trying to handle message.{0}".format(e) - ) - logger.debug(e, exc_info=True) - pass - return response.text("") + + if not output: + return response.text("") + + metadata = self.get_metadata(request) + # handle normal message with trigger_word + if "trigger_word" in output: + await self.message_with_trigger_word(on_new_message, output, metadata) + + # handle context actions from buttons + elif "context" in output: + await self.action_from_button(on_new_message, output, metadata) + + return response.text("success") return mattermost_webhook diff --git a/rasa/core/channels/rasa_chat.py b/rasa/core/channels/rasa_chat.py index 660a04121322..12ce77aea68e 100644 --- a/rasa/core/channels/rasa_chat.py +++ b/rasa/core/channels/rasa_chat.py @@ -1,5 +1,5 @@ import json -from typing import Text, Optional, Dict +from typing import Text, Optional, Dict, Any import aiohttp import logging @@ -7,34 +7,39 @@ import jwt from rasa.core import constants -from rasa.core.channels.channel import RestInput +from rasa.core.channels.channel import InputChannel +from rasa.core.channels.rest import RestInput from rasa.core.constants import DEFAULT_REQUEST_TIMEOUT from sanic.request import Request logger = logging.getLogger(__name__) +CONVERSATION_ID_KEY = "conversation_id" +JWT_USERNAME_KEY = "username" +INTERACTIVE_LEARNING_PERMISSION = "clientEvents:create" + class RasaChatInput(RestInput): """Chat input channel for Rasa X""" @classmethod - def name(cls): + def name(cls) -> Text: return "rasa" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() - return cls(credentials.get("url")) + return cls(credentials.get("url")) # pytype: disable=attribute-error - def __init__(self, url): + def __init__(self, url: Optional[Text]) -> None: self.base_url = url self.jwt_key = None self.jwt_algorithm = None async def _fetch_public_key(self) -> None: - public_key_url = "{}/version".format(self.base_url) + public_key_url = f"{self.base_url}/version" async with aiohttp.ClientSession() as session: async with session.get( public_key_url, timeout=DEFAULT_REQUEST_TIMEOUT @@ -88,15 +93,39 @@ async def _decode_bearer_token(self, bearer_token: Text) -> Optional[Dict]: logger.exception("Failed to decode bearer token.") async def _extract_sender(self, req: Request) -> Optional[Text]: - """Fetch user from the Rasa X Admin API""" + """Fetch user from the Rasa X Admin API.""" + jwt_payload = None if req.headers.get("Authorization"): - user = await self._decode_bearer_token(req.headers["Authorization"]) - if user: - return user["username"] - - user = await self._decode_bearer_token(req.args.get("token", default=None)) - if user: - return user["username"] - - abort(401) + jwt_payload = await self._decode_bearer_token(req.headers["Authorization"]) + + if not jwt_payload: + jwt_payload = await self._decode_bearer_token(req.args.get("token")) + + if not jwt_payload: + abort(401) + + if CONVERSATION_ID_KEY in req.json: + if self._has_user_permission_to_send_messages_to_conversation( + jwt_payload, req.json + ): + return req.json[CONVERSATION_ID_KEY] + else: + logger.error( + "User '{}' does not have permissions to send messages to " + "conversation '{}'.".format( + jwt_payload[JWT_USERNAME_KEY], req.json[CONVERSATION_ID_KEY] + ) + ) + abort(401) + + return jwt_payload[JWT_USERNAME_KEY] + + @staticmethod + def _has_user_permission_to_send_messages_to_conversation( + jwt_payload: Dict, message: Dict + ) -> bool: + user_scopes = jwt_payload.get("scopes", []) + return INTERACTIVE_LEARNING_PERMISSION in user_scopes or message[ + CONVERSATION_ID_KEY + ] == jwt_payload.get(JWT_USERNAME_KEY) diff --git a/rasa/core/channels/rest.py b/rasa/core/channels/rest.py new file mode 100644 index 000000000000..0077a6c69bfc --- /dev/null +++ b/rasa/core/channels/rest.py @@ -0,0 +1,162 @@ +import asyncio +import inspect +import json +import logging +from asyncio import Queue, CancelledError +from sanic import Sanic, Blueprint, response +from sanic.request import Request +from sanic.response import HTTPResponse +from typing import Text, Dict, Any, Optional, Callable, Awaitable, NoReturn + +import rasa.utils.endpoints +from rasa.core.channels.channel import ( + InputChannel, + CollectingOutputChannel, + UserMessage, +) + + +logger = logging.getLogger(__name__) + + +class RestInput(InputChannel): + """A custom http input channel. + + This implementation is the basis for a custom implementation of a chat + frontend. You can customize this to send messages to Rasa and + retrieve responses from the assistant.""" + + @classmethod + def name(cls) -> Text: + return "rest" + + @staticmethod + async def on_message_wrapper( + on_new_message: Callable[[UserMessage], Awaitable[Any]], + text: Text, + queue: Queue, + sender_id: Text, + input_channel: Text, + metadata: Optional[Dict[Text, Any]], + ) -> None: + collector = QueueOutputChannel(queue) + + message = UserMessage( + text, collector, sender_id, input_channel=input_channel, metadata=metadata + ) + await on_new_message(message) + + await queue.put("DONE") # pytype: disable=bad-return-type + + async def _extract_sender(self, req: Request) -> Optional[Text]: + return req.json.get("sender", None) + + # noinspection PyMethodMayBeStatic + def _extract_message(self, req: Request) -> Optional[Text]: + return req.json.get("message", None) + + def _extract_input_channel(self, req: Request) -> Text: + return req.json.get("input_channel") or self.name() + + def stream_response( + self, + on_new_message: Callable[[UserMessage], Awaitable[None]], + text: Text, + sender_id: Text, + input_channel: Text, + metadata: Optional[Dict[Text, Any]], + ) -> Callable[[Any], Awaitable[None]]: + async def stream(resp: Any) -> None: + q = Queue() + task = asyncio.ensure_future( + self.on_message_wrapper( + on_new_message, text, q, sender_id, input_channel, metadata + ) + ) + result = None # declare variable up front to avoid pytype error + while True: + result = await q.get() + if result == "DONE": + break + else: + await resp.write(json.dumps(result) + "\n") + await task + + return stream # pytype: disable=bad-return-type + + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[None]] + ) -> Blueprint: + custom_webhook = Blueprint( + "custom_webhook_{}".format(type(self).__name__), + inspect.getmodule(self).__name__, + ) + + # noinspection PyUnusedLocal + @custom_webhook.route("/", methods=["GET"]) + async def health(request: Request) -> HTTPResponse: + return response.json({"status": "ok"}) + + @custom_webhook.route("/webhook", methods=["POST"]) + async def receive(request: Request) -> HTTPResponse: + sender_id = await self._extract_sender(request) + text = self._extract_message(request) + should_use_stream = rasa.utils.endpoints.bool_arg( + request, "stream", default=False + ) + input_channel = self._extract_input_channel(request) + metadata = self.get_metadata(request) + + if should_use_stream: + return response.stream( + self.stream_response( + on_new_message, text, sender_id, input_channel, metadata + ), + content_type="text/event-stream", + ) + else: + collector = CollectingOutputChannel() + # noinspection PyBroadException + try: + await on_new_message( + UserMessage( + text, + collector, + sender_id, + input_channel=input_channel, + metadata=metadata, + ) + ) + except CancelledError: + logger.error( + f"Message handling timed out for " f"user message '{text}'." + ) + except Exception: + logger.exception( + f"An exception occured while handling " + f"user message '{text}'." + ) + return response.json(collector.messages) + + return custom_webhook + + +class QueueOutputChannel(CollectingOutputChannel): + """Output channel that collects send messages in a list + + (doesn't send them anywhere, just collects them).""" + + @classmethod + def name(cls) -> Text: + return "queue" + + # noinspection PyMissingConstructor + def __init__(self, message_queue: Optional[Queue] = None) -> None: + super().__init__() + self.messages = Queue() if not message_queue else message_queue + + def latest_output(self) -> NoReturn: + raise NotImplementedError("A queue doesn't allow to peek at messages.") + + async def _persist_message(self, message) -> None: + await self.messages.put(message) # pytype: disable=bad-return-type diff --git a/rasa/core/channels/rocketchat.py b/rasa/core/channels/rocketchat.py index 2efb996cce37..2cc221012f94 100644 --- a/rasa/core/channels/rocketchat.py +++ b/rasa/core/channels/rocketchat.py @@ -1,25 +1,26 @@ import logging from sanic import Blueprint, response from sanic.request import Request -from typing import Text, Dict, Any, List, Iterable +from typing import Text, Dict, Any, List, Iterable, Optional, Callable, Awaitable from rasa.core.channels.channel import UserMessage, OutputChannel, InputChannel +from sanic.response import HTTPResponse logger = logging.getLogger(__name__) class RocketChatBot(OutputChannel): @classmethod - def name(cls): + def name(cls) -> Text: return "rocketchat" - def __init__(self, user, password, server_url): + def __init__(self, user, password, server_url) -> None: from rocketchat_API.rocketchat import RocketChat self.rocket = RocketChat(user, password, server_url=server_url) @staticmethod - def _convert_to_rocket_buttons(buttons): + def _convert_to_rocket_buttons(buttons: List[Dict]) -> List[Dict]: return [ { "text": b["title"], @@ -35,7 +36,7 @@ async def send_text_message( ) -> None: """Send message to output channel""" - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): self.rocket.chat_post_message(message_part, room_id=recipient_id) async def send_image_url( @@ -59,7 +60,7 @@ async def send_text_with_buttons( recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: # implementation is based on # https://github.com/RocketChat/Rocket.Chat/pull/11473 @@ -85,7 +86,8 @@ async def send_custom_json( if json_message.get("channel"): if json_message.get("room_id"): logger.warning( - "Only one of `channel` or `room_id` can be passed to a RocketChat message post. Defaulting to `channel`." + "Only one of `channel` or `room_id` can be passed to a RocketChat " + "message post. Defaulting to `channel`." ) del json_message["room_id"] return self.rocket.chat_post_message(text, **json_message) @@ -98,19 +100,21 @@ class RocketChatInput(InputChannel): """RocketChat input channel implementation.""" @classmethod - def name(cls): + def name(cls) -> Text: return "rocketchat" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls( credentials.get("user"), credentials.get("password"), credentials.get("server_url"), ) + # pytype: enable=attribute-error def __init__(self, user: Text, password: Text, server_url: Text) -> None: @@ -118,25 +122,39 @@ def __init__(self, user: Text, password: Text, server_url: Text) -> None: self.password = password self.server_url = server_url - async def send_message(self, text, sender_name, recipient_id, on_new_message): + async def send_message( + self, + text: Optional[Text], + sender_name: Optional[Text], + recipient_id: Optional[Text], + on_new_message: Callable[[UserMessage], Awaitable[Any]], + metadata: Optional[Dict], + ): if sender_name != self.user: output_channel = self.get_output_channel() user_msg = UserMessage( - text, output_channel, recipient_id, input_channel=self.name() + text, + output_channel, + recipient_id, + input_channel=self.name(), + metadata=metadata, ) await on_new_message(user_msg) - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: rocketchat_webhook = Blueprint("rocketchat_webhook", __name__) @rocketchat_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @rocketchat_webhook.route("/webhook", methods=["GET", "POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: output = request.json + metadata = self.get_metadata(request) if output: if "visitor" not in output: sender_name = output.get("user_name", None) @@ -148,7 +166,9 @@ async def webhook(request: Request): sender_name = messages_list[0].get("username", None) recipient_id = output.get("_id") - await self.send_message(text, sender_name, recipient_id, on_new_message) + await self.send_message( + text, sender_name, recipient_id, on_new_message, metadata + ) return response.text("") diff --git a/rasa/core/channels/slack.py b/rasa/core/channels/slack.py index f6dd221c18db..88913c014620 100644 --- a/rasa/core/channels/slack.py +++ b/rasa/core/channels/slack.py @@ -1,44 +1,56 @@ import json import logging import re +from typing import Any, Awaitable, Callable, Dict, List, Optional, Text + +from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage +from rasa.utils.common import raise_warning from sanic import Blueprint, response from sanic.request import Request -from slackclient import SlackClient -from typing import Text, Optional, List, Dict, Any - -from rasa.core.channels.channel import InputChannel -from rasa.core.channels.channel import UserMessage, OutputChannel +from sanic.response import HTTPResponse +from slack import WebClient logger = logging.getLogger(__name__) -class SlackBot(SlackClient, OutputChannel): +class SlackBot(OutputChannel): """A Slack communication channel""" @classmethod - def name(cls): + def name(cls) -> Text: return "slack" - def __init__(self, token: Text, slack_channel: Optional[Text] = None) -> None: + def __init__( + self, + token: Text, + slack_channel: Optional[Text] = None, + thread_id: Optional[Text] = None, + proxy: Optional[Text] = None, + ) -> None: self.slack_channel = slack_channel - super(SlackBot, self).__init__(token) + self.thread_id = thread_id + self.proxy = proxy + self.client = WebClient(token, run_async=True, proxy=proxy) + super().__init__() @staticmethod - def _get_text_from_slack_buttons(buttons): + def _get_text_from_slack_buttons(buttons: List[Dict]) -> Text: return "".join([b.get("title", "") for b in buttons]) + async def _post_message(self, **kwargs: Any): + if self.thread_id: + await self.client.chat_postMessage(**kwargs, thread_ts=self.thread_id) + else: + await self.client.chat_postMessage(**kwargs) + async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: recipient = self.slack_channel or recipient_id - for message_part in text.split("\n\n"): - super(SlackBot, self).api_call( - "chat.postMessage", - channel=recipient, - as_user=True, - text=message_part, - type="mrkdwn", + for message_part in text.strip().split("\n\n"): + await self._post_message( + channel=recipient, as_user=True, text=message_part, type="mrkdwn" ) async def send_image_url( @@ -46,24 +58,17 @@ async def send_image_url( ) -> None: recipient = self.slack_channel or recipient_id image_block = {"type": "image", "image_url": image, "alt_text": image} - return super(SlackBot, self).api_call( - "chat.postMessage", - channel=recipient, - as_user=True, - text=image, - blocks=[image_block], + + await self._post_message( + channel=recipient, as_user=True, text=image, blocks=[image_block] ) async def send_attachment( self, recipient_id: Text, attachment: Dict[Text, Any], **kwargs: Any ) -> None: recipient = self.slack_channel or recipient_id - return super(SlackBot, self).api_call( - "chat.postMessage", - channel=recipient, - as_user=True, - attachments=[attachment], - **kwargs + await self._post_message( + channel=recipient, as_user=True, attachments=[attachment], **kwargs ) async def send_text_with_buttons( @@ -71,16 +76,16 @@ async def send_text_with_buttons( recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: recipient = self.slack_channel or recipient_id text_block = {"type": "section", "text": {"type": "plain_text", "text": text}} if len(buttons) > 5: - logger.warning( + raise_warning( "Slack API currently allows only up to 5 buttons. " - "If you add more, all will be ignored." + "Since you added more than 5, slack will ignore all of them." ) return await self.send_text_message(recipient, text, **kwargs) @@ -93,8 +98,8 @@ async def send_text_with_buttons( "value": button["payload"], } ) - super(SlackBot, self).api_call( - "chat.postMessage", + + await self._post_message( channel=recipient, as_user=True, text=text, @@ -106,35 +111,42 @@ async def send_custom_json( ) -> None: json_message.setdefault("channel", self.slack_channel or recipient_id) json_message.setdefault("as_user", True) - return super(SlackBot, self).api_call("chat.postMessage", **json_message) + await self._post_message(**json_message) class SlackInput(InputChannel): """Slack input channel implementation. Based on the HTTPInputChannel.""" @classmethod - def name(cls): + def name(cls) -> Text: return "slack" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + + # pytype: disable=attribute-error return cls( credentials.get("slack_token"), credentials.get("slack_channel"), + credentials.get("proxy"), credentials.get("slack_retry_reason_header", "x-slack-retry-reason"), credentials.get("slack_retry_number_header", "x-slack-retry-num"), credentials.get("errors_ignore_retry", None), + credentials.get("use_threads", False), ) + # pytype: enable=attribute-error def __init__( self, slack_token: Text, slack_channel: Optional[Text] = None, + proxy: Optional[Text] = None, slack_retry_reason_header: Optional[Text] = None, slack_retry_number_header: Optional[Text] = None, errors_ignore_retry: Optional[List[Text]] = None, + use_threads: Optional[bool] = False, ) -> None: """Create a Slack input channel. @@ -151,34 +163,53 @@ def __init__( the bot posts, or channel name (e.g. '#bot-test') If not set, messages will be sent back to the "App" DM channel of your bot's name. + proxy: A Proxy Server to route your traffic through slack_retry_reason_header: Slack HTTP header name indicating reason that slack send retry request. slack_retry_number_header: Slack HTTP header name indicating the attempt number errors_ignore_retry: Any error codes given by Slack included in this list will be ignored. Error codes are listed `here <https://api.slack.com/events-api#errors>`_. + use_threads: If set to True, your bot will send responses in Slack as a threaded message. + Responses will appear as a normal Slack message if set to False. """ self.slack_token = slack_token self.slack_channel = slack_channel + self.proxy = proxy self.errors_ignore_retry = errors_ignore_retry or ("http_timeout",) self.retry_reason_header = slack_retry_reason_header self.retry_num_header = slack_retry_number_header + self.use_threads = use_threads @staticmethod - def _is_user_message(slack_event): + def _is_app_mention(slack_event: Dict) -> bool: + try: + return slack_event["event"]["type"] == "app_mention" + except KeyError: + return False + + @staticmethod + def _is_direct_message(slack_event: Dict) -> bool: + try: + return slack_event["event"]["channel_type"] == "im" + except KeyError: + return False + + @staticmethod + def _is_user_message(slack_event: Dict) -> bool: return ( slack_event.get("event") and ( - slack_event.get("event").get("type") == "message" - or slack_event.get("event").get("type") == "app_mention" + slack_event.get("event", {}).get("type") == "message" + or slack_event.get("event", {}).get("type") == "app_mention" ) - and slack_event.get("event").get("text") - and not slack_event.get("event").get("bot_id") + and slack_event.get("event", {}).get("text") + and not slack_event.get("event", {}).get("bot_id") ) @staticmethod - def _sanitize_user_message(text, uids_to_remove): + def _sanitize_user_message(text, uids_to_remove) -> Text: """Remove superfluous/wrong/problematic tokens from a message. Probably a good starting point for pre-formatting of user-provided text @@ -193,22 +224,33 @@ def _sanitize_user_message(text, uids_to_remove): Returns: str: parsed and cleaned version of the input text """ + for uid_to_remove in uids_to_remove: # heuristic to format majority cases OK # can be adjusted to taste later if needed, # but is a good first approximation for regex, replacement in [ - (r"<@{}>\s".format(uid_to_remove), ""), - (r"\s<@{}>".format(uid_to_remove), ""), - # a bit arbitrary but probably OK - (r"<@{}>".format(uid_to_remove), " "), + (fr"<@{uid_to_remove}>\s", ""), + (fr"\s<@{uid_to_remove}>", ""), # a bit arbitrary but probably OK + (fr"<@{uid_to_remove}>", " "), ]: text = re.sub(regex, replacement, text) + """Find multiple mailto or http links like <mailto:xyz@rasa.com|xyz@rasa.com> or '<http://url.com|url.com>in text and substitute it with original content + """ + + pattern = r"(\<(?:mailto|http|https):\/\/.*?\|.*?\>)" + match = re.findall(pattern, text) + + if match: + for remove in match: + replacement = remove.split("|")[1] + replacement = replacement.replace(">", "") + text = text.replace(remove, replacement) return text.strip() @staticmethod - def _is_interactive_message(payload): + def _is_interactive_message(payload: Dict) -> bool: """Check wheter the input is a supported interactive input type.""" supported = [ @@ -229,14 +271,12 @@ def _is_interactive_message(payload): elif action_type: logger.warning( "Received input from a Slack interactive component of type " - + "'{}', for which payload parsing is not yet supported.".format( - payload["actions"][0]["type"] - ) + f"'{payload['actions'][0]['type']}', for which payload parsing is not yet supported." ) return False @staticmethod - def _get_interactive_repsonse(action): + def _get_interactive_response(action: Dict) -> Optional[Text]: """Parse the payload for the response value.""" if action["type"] == "button": @@ -258,7 +298,14 @@ def _get_interactive_repsonse(action): elif action["type"] == "datepicker": return action.get("selected_date") - async def process_message(self, request: Request, on_new_message, text, sender_id): + async def process_message( + self, + request: Request, + on_new_message: Callable[[UserMessage], Awaitable[Any]], + text, + sender_id: Optional[Text], + metadata: Optional[Dict], + ) -> Any: """Slack retries to post messages up to 3 times based on failure conditions defined here: https://api.slack.com/events-api#failure_conditions @@ -267,70 +314,155 @@ async def process_message(self, request: Request, on_new_message, text, sender_i retry_count = request.headers.get(self.retry_num_header) if retry_count and retry_reason in self.errors_ignore_retry: logger.warning( - "Received retry #{} request from slack" - " due to {}".format(retry_count, retry_reason) + f"Received retry #{retry_count} request from slack" + f" due to {retry_reason}." ) return response.text(None, status=201, headers={"X-Slack-No-Retry": 1}) + if metadata is not None: + output_channel = metadata.get("out_channel") + if self.use_threads: + thread_id = metadata.get("thread_id") + else: + thread_id = None + else: + output_channel = None + thread_id = None + try: - out_channel = self.get_output_channel() user_msg = UserMessage( - text, out_channel, sender_id, input_channel=self.name() + text, + self.get_output_channel(output_channel, thread_id), + sender_id, + input_channel=self.name(), + metadata=metadata, ) await on_new_message(user_msg) except Exception as e: - logger.error("Exception when trying to handle message.{0}".format(e)) + logger.error(f"Exception when trying to handle message.{e}") logger.error(str(e), exc_info=True) return response.text("") - def blueprint(self, on_new_message): + def get_metadata(self, request: Request) -> Dict[Text, Any]: + """Extracts the metadata from a slack API event (https://api.slack.com/types/event). + + Args: + request: A `Request` object that contains a slack API event in the body. + + Returns: + Metadata extracted from the sent event payload. This includes the output channel for the response, + and users that have installed the bot. + """ + content_type = request.headers.get("content-type") + + # Slack API sends either a JSON-encoded or a URL-encoded body depending on the content + if content_type == "application/json": + # if JSON-encoded message is received + slack_event = request.json + event = slack_event.get("event", {}) + thread_id = event.get("thread_ts", event.get("ts")) + + return { + "out_channel": event.get("channel"), + "thread_id": thread_id, + "users": slack_event.get("authed_users"), + } + + if content_type == "application/x-www-form-urlencoded": + # if URL-encoded message is received + output = request.form + payload = json.loads(output["payload"][0]) + message = payload.get("message", {}) + thread_id = message.get("thread_ts", message.get("ts")) + return { + "out_channel": payload.get("channel", {}).get("id"), + "thread_id": thread_id, + "users": payload.get("user", {}).get("id"), + } + + return {} + + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: slack_webhook = Blueprint("slack_webhook", __name__) @slack_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @slack_webhook.route("/webhook", methods=["GET", "POST"]) - async def webhook(request: Request): - if request.form: + async def webhook(request: Request) -> HTTPResponse: + content_type = request.headers.get("content-type") + # Slack API sends either a JSON-encoded or a URL-encoded body depending on the content + + if content_type == "application/json": + # if JSON-encoded message is received + output = request.json + event = output.get("event", {}) + user_message = event.get("text", "") + sender_id = event.get("user", "") + metadata = self.get_metadata(request) + + if "challenge" in output: + return response.json(output.get("challenge")) + + elif self._is_user_message(output) and self._is_supported_channel( + output, metadata + ): + return await self.process_message( + request, + on_new_message, + text=self._sanitize_user_message( + user_message, metadata["users"] + ), + sender_id=sender_id, + metadata=metadata, + ) + else: + logger.warning( + f"Received message on unsupported channel: {metadata['out_channel']}" + ) + + elif content_type == "application/x-www-form-urlencoded": + # if URL-encoded message is received output = request.form payload = json.loads(output["payload"][0]) if self._is_interactive_message(payload): sender_id = payload["user"]["id"] - text = self._get_interactive_repsonse(payload["actions"][0]) + text = self._get_interactive_response(payload["actions"][0]) if text is not None: + metadata = self.get_metadata(request) return await self.process_message( - request, on_new_message, text=text, sender_id=sender_id + request, on_new_message, text, sender_id, metadata ) - elif payload["actions"][0]["type"] == "button": + if payload["actions"][0]["type"] == "button": # link buttons don't have "value", don't send their clicks to bot return response.text("User clicked link button") return response.text( "The input message could not be processed.", status=500 ) - elif request.json: - output = request.json - if "challenge" in output: - return response.json(output.get("challenge")) + return response.text("Bot message delivered.") - elif self._is_user_message(output): - return await self.process_message( - request, - on_new_message, - text=self._sanitize_user_message( - output["event"]["text"], output["authed_users"] - ), - sender_id=output.get("event").get("user"), - ) + return slack_webhook - return response.text("Bot message delivered") + def _is_supported_channel(self, slack_event: Dict, metadata: Dict) -> bool: + return ( + self._is_direct_message(slack_event) + or self._is_app_mention(slack_event) + or metadata["out_channel"] == self.slack_channel + ) - return slack_webhook + def get_output_channel( + self, channel: Optional[Text] = None, thread_id: Optional[Text] = None + ) -> OutputChannel: + channel = channel or self.slack_channel + return SlackBot(self.slack_token, channel, thread_id, self.proxy) - def get_output_channel(self) -> OutputChannel: - return SlackBot(self.slack_token, self.slack_channel) + def set_output_channel(self, channel: Text) -> None: + self.slack_channel = channel diff --git a/rasa/core/channels/socketio.py b/rasa/core/channels/socketio.py index 1c9e33276d6a..ef67491eb459 100644 --- a/rasa/core/channels/socketio.py +++ b/rasa/core/channels/socketio.py @@ -1,12 +1,13 @@ import logging import uuid +from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, Text + +from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage +from rasa.utils.common import raise_warning from sanic import Blueprint, response from sanic.request import Request +from sanic.response import HTTPResponse from socketio import AsyncServer -from typing import Optional, Text, Any, List, Dict, Iterable - -from rasa.core.channels.channel import InputChannel -from rasa.core.channels.channel import UserMessage, OutputChannel logger = logging.getLogger(__name__) @@ -15,21 +16,20 @@ class SocketBlueprint(Blueprint): def __init__(self, sio: AsyncServer, socketio_path, *args, **kwargs): self.sio = sio self.socketio_path = socketio_path - super(SocketBlueprint, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) - def register(self, app, options): + def register(self, app, options) -> None: self.sio.attach(app, self.socketio_path) - super(SocketBlueprint, self).register(app, options) + super().register(app, options) class SocketIOOutput(OutputChannel): @classmethod - def name(cls): + def name(cls) -> Text: return "socketio" - def __init__(self, sio, sid, bot_message_evt): + def __init__(self, sio: AsyncServer, bot_message_evt: Text) -> None: self.sio = sio - self.sid = sid self.bot_message_evt = bot_message_evt async def _send_message(self, socket_id: Text, response: Any) -> None: @@ -42,7 +42,8 @@ async def send_text_message( ) -> None: """Send a message through this channel.""" - await self._send_message(self.sid, {"text": text}) + for message_part in text.strip().split("\n\n"): + await self._send_message(recipient_id, {"text": message_part}) async def send_image_url( self, recipient_id: Text, image: Text, **kwargs: Any @@ -50,21 +51,26 @@ async def send_image_url( """Sends an image to the output""" message = {"attachment": {"type": "image", "payload": {"src": image}}} - await self._send_message(self.sid, message) + await self._send_message(recipient_id, message) async def send_text_with_buttons( self, recipient_id: Text, text: Text, buttons: List[Dict[Text, Any]], - **kwargs: Any + **kwargs: Any, ) -> None: """Sends buttons to the output.""" - message = {"text": text, "quick_replies": []} + # split text and create a message for each text fragment + # the `or` makes sure there is at least one message we can attach the quick + # replies to + message_parts = text.strip().split("\n\n") or [text] + messages = [{"text": message, "quick_replies": []} for message in message_parts] + # attach all buttons to the last text fragment for button in buttons: - message["quick_replies"].append( + messages[-1]["quick_replies"].append( { "content_type": "text", "title": button["title"], @@ -72,7 +78,8 @@ async def send_text_with_buttons( } ) - await self._send_message(self.sid, message) + for message in messages: + await self._send_message(recipient_id, message) async def send_elements( self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any @@ -87,14 +94,14 @@ async def send_elements( } } - await self._send_message(self.sid, message) + await self._send_message(recipient_id, message) async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: """Sends custom json to the output""" - json_message.setdefault("room", self.sid) + json_message.setdefault("room", recipient_id) await self.sio.emit(self.bot_message_evt, **json_message) @@ -102,18 +109,18 @@ async def send_attachment( self, recipient_id: Text, attachment: Dict[Text, Any], **kwargs: Any ) -> None: """Sends an attachment to the user.""" - await self._send_message(self.sid, {"attachment": attachment}) + await self._send_message(recipient_id, {"attachment": attachment}) class SocketIOInput(InputChannel): """A socket.io input channel.""" @classmethod - def name(cls): + def name(cls) -> Text: return "socketio" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: credentials = credentials or {} return cls( credentials.get("user_message_evt", "user_uttered"), @@ -136,8 +143,23 @@ def __init__( self.user_message_evt = user_message_evt self.namespace = namespace self.socketio_path = socketio_path + self.sio = None + + def get_output_channel(self) -> Optional["OutputChannel"]: + if self.sio is None: + raise_warning( + "SocketIO output channel cannot be recreated. " + "This is expected behavior when using multiple Sanic " + "workers or multiple Rasa Open Source instances. " + "Please use a different channel for external events in these " + "scenarios." + ) + return + return SocketIOOutput(self.sio, self.bot_message_evt) - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: # Workaround so that socketio works with requests from other origins. # https://github.com/miguelgrinberg/python-socketio/issues/205#issuecomment-493769183 sio = AsyncServer(async_mode="sanic", cors_allowed_origins=[]) @@ -145,35 +167,40 @@ def blueprint(self, on_new_message): sio, self.socketio_path, "socketio_webhook", __name__ ) + # make sio object static to use in get_output_channel + self.sio = sio + @socketio_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @sio.on("connect", namespace=self.namespace) - async def connect(sid, environ): - logger.debug("User {} connected to socketIO endpoint.".format(sid)) + async def connect(sid: Text, _) -> None: + logger.debug(f"User {sid} connected to socketIO endpoint.") @sio.on("disconnect", namespace=self.namespace) - async def disconnect(sid): - logger.debug("User {} disconnected from socketIO endpoint.".format(sid)) + async def disconnect(sid: Text) -> None: + logger.debug(f"User {sid} disconnected from socketIO endpoint.") @sio.on("session_request", namespace=self.namespace) - async def session_request(sid, data): + async def session_request(sid: Text, data: Optional[Dict]): if data is None: data = {} if "session_id" not in data or data["session_id"] is None: data["session_id"] = uuid.uuid4().hex + if self.session_persistence: + sio.enter_room(sid, data["session_id"]) await sio.emit("session_confirm", data["session_id"], room=sid) - logger.debug("User {} connected to socketIO endpoint.".format(sid)) + logger.debug(f"User {sid} connected to socketIO endpoint.") @sio.on(self.user_message_evt, namespace=self.namespace) - async def handle_message(sid, data): - output_channel = SocketIOOutput(sio, sid, self.bot_message_evt) + async def handle_message(sid: Text, data: Dict) -> Any: + output_channel = SocketIOOutput(sio, self.bot_message_evt) if self.session_persistence: if not data.get("session_id"): - logger.warning( - "A message without a valid sender_id " + raise_warning( + "A message without a valid session_id " "was received. This message will be " "ignored. Make sure to set a proper " "session id using the " diff --git a/rasa/core/channels/telegram.py b/rasa/core/channels/telegram.py index 035dcd7c6d2c..605d304b92b5 100644 --- a/rasa/core/channels/telegram.py +++ b/rasa/core/channels/telegram.py @@ -1,6 +1,8 @@ import logging +from copy import deepcopy from sanic import Blueprint, response from sanic.request import Request +from sanic.response import HTTPResponse from telegram import ( Bot, InlineKeyboardButton, @@ -9,7 +11,7 @@ KeyboardButton, ReplyKeyboardMarkup, ) -from typing import Dict, Text, Any, List, Optional +from typing import Dict, Text, Any, List, Optional, Callable, Awaitable from rasa.core.channels.channel import InputChannel, UserMessage, OutputChannel from rasa.core.constants import INTENT_MESSAGE_PREFIX, USER_INTENT_RESTART @@ -21,16 +23,16 @@ class TelegramOutput(Bot, OutputChannel): """Output channel for Telegram""" @classmethod - def name(cls): + def name(cls) -> Text: return "telegram" - def __init__(self, access_token): - super(TelegramOutput, self).__init__(access_token) + def __init__(self, access_token: Optional[Text]) -> None: + super().__init__(access_token) async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): self.send_message(recipient_id, message_part) async def send_image_url( @@ -44,7 +46,7 @@ async def send_text_with_buttons( text: Text, buttons: List[Dict[Text, Any]], button_type: Optional[Text] = "inline", - **kwargs: Any + **kwargs: Any, ) -> None: """Sends a message with keyboard. @@ -94,6 +96,8 @@ async def send_text_with_buttons( async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: + json_message = deepcopy(json_message) + recipient_id = json_message.pop("chat_id", recipient_id) send_functions = { @@ -134,48 +138,58 @@ class TelegramInput(InputChannel): """Telegram input channel""" @classmethod - def name(cls): + def name(cls) -> Text: return "telegram" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls( credentials.get("access_token"), credentials.get("verify"), credentials.get("webhook_url"), ) + # pytype: enable=attribute-error - def __init__(self, access_token, verify, webhook_url, debug_mode=True): + def __init__( + self, + access_token: Optional[Text], + verify: Optional[Text], + webhook_url: Optional[Text], + debug_mode: bool = True, + ) -> None: self.access_token = access_token self.verify = verify self.webhook_url = webhook_url self.debug_mode = debug_mode @staticmethod - def _is_location(message): - return message.location + def _is_location(message) -> bool: + return message.location is not None @staticmethod - def _is_user_message(message): - return message.text + def _is_user_message(message) -> bool: + return message.text is not None @staticmethod - def _is_button(update): - return update.callback_query + def _is_button(message) -> bool: + return message.callback_query is not None - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: telegram_webhook = Blueprint("telegram_webhook", __name__) out_channel = self.get_output_channel() @telegram_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @telegram_webhook.route("/set_webhook", methods=["GET", "POST"]) - async def set_webhook(request: Request): + async def set_webhook(_: Request) -> HTTPResponse: s = out_channel.setWebhook(self.webhook_url) if s: logger.info("Webhook Setup Successful") @@ -185,7 +199,7 @@ async def set_webhook(request: Request): return response.text("Invalid webhook") @telegram_webhook.route("/webhook", methods=["GET", "POST"]) - async def message(request: Request): + async def message(request: Request) -> Any: if request.method == "POST": if not out_channel.get_me()["username"] == self.verify: @@ -207,11 +221,16 @@ async def message(request: Request): else: return response.text("success") sender_id = msg.chat.id + metadata = self.get_metadata(request) try: if text == (INTENT_MESSAGE_PREFIX + USER_INTENT_RESTART): await on_new_message( UserMessage( - text, out_channel, sender_id, input_channel=self.name() + text, + out_channel, + sender_id, + input_channel=self.name(), + metadata=metadata, ) ) await on_new_message( @@ -220,18 +239,21 @@ async def message(request: Request): out_channel, sender_id, input_channel=self.name(), + metadata=metadata, ) ) else: await on_new_message( UserMessage( - text, out_channel, sender_id, input_channel=self.name() + text, + out_channel, + sender_id, + input_channel=self.name(), + metadata=metadata, ) ) except Exception as e: - logger.error( - "Exception when trying to handle message.{0}".format(e) - ) + logger.error(f"Exception when trying to handle message.{e}") logger.debug(e, exc_info=True) if self.debug_mode: raise diff --git a/rasa/core/channels/twilio.py b/rasa/core/channels/twilio.py index bdbd3d3c89fa..1f1715c4a71d 100644 --- a/rasa/core/channels/twilio.py +++ b/rasa/core/channels/twilio.py @@ -1,10 +1,10 @@ -# -*- coding: utf-8 -*- import logging from sanic import Blueprint, response from sanic.request import Request +from sanic.response import HTTPResponse from twilio.base.exceptions import TwilioRestException from twilio.rest import Client -from typing import Dict, Text, Any +from typing import Dict, Text, Any, Callable, Awaitable, Optional from rasa.core.channels.channel import InputChannel from rasa.core.channels.channel import UserMessage, OutputChannel @@ -16,11 +16,16 @@ class TwilioOutput(Client, OutputChannel): """Output channel for Twilio""" @classmethod - def name(cls): + def name(cls) -> Text: return "twilio" - def __init__(self, account_sid, auth_token, twilio_number): - super(TwilioOutput, self).__init__(account_sid, auth_token) + def __init__( + self, + account_sid: Optional[Text], + auth_token: Optional[Text], + twilio_number: Optional[Text], + ) -> None: + super().__init__(account_sid, auth_token) self.twilio_number = twilio_number self.send_retry = 0 self.max_retry = 5 @@ -47,10 +52,22 @@ async def send_text_message( """Sends text message""" message_data = {"to": recipient_id, "from_": self.twilio_number} - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): message_data.update({"body": message_part}) await self._send_message(message_data) + async def send_image_url( + self, recipient_id: Text, image: Text, **kwargs: Any + ) -> None: + """Sends an image.""" + + message_data = { + "to": recipient_id, + "from_": self.twilio_number, + "media_url": [image], + } + await self._send_message(message_data) + async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: @@ -60,7 +77,7 @@ async def send_custom_json( if not json_message.get("media_url"): json_message.setdefault("body", "") if not json_message.get("messaging_service_sid"): - json_message.setdefault("from", self.twilio_number) + json_message.setdefault("from_", self.twilio_number) await self._send_message(json_message) @@ -69,53 +86,66 @@ class TwilioInput(InputChannel): """Twilio input channel""" @classmethod - def name(cls): + def name(cls) -> Text: return "twilio" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls( credentials.get("account_sid"), credentials.get("auth_token"), credentials.get("twilio_number"), ) - - def __init__(self, account_sid, auth_token, twilio_number, debug_mode=True): + # pytype: enable=attribute-error + + def __init__( + self, + account_sid: Optional[Text], + auth_token: Optional[Text], + twilio_number: Optional[Text], + debug_mode: bool = True, + ) -> None: self.account_sid = account_sid self.auth_token = auth_token self.twilio_number = twilio_number self.debug_mode = debug_mode - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: twilio_webhook = Blueprint("twilio_webhook", __name__) @twilio_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @twilio_webhook.route("/webhook", methods=["POST"]) - async def message(request: Request): + async def message(request: Request) -> HTTPResponse: sender = request.form.get("From", None) text = request.form.get("Body", None) out_channel = self.get_output_channel() if sender is not None and message is not None: + metadata = self.get_metadata(request) try: # @ signs get corrupted in SMSes by some carriers text = text.replace("¡", "@") await on_new_message( UserMessage( - text, out_channel, sender, input_channel=self.name() + text, + out_channel, + sender, + input_channel=self.name(), + metadata=metadata, ) ) except Exception as e: - logger.error( - "Exception when trying to handle message.{0}".format(e) - ) + logger.error(f"Exception when trying to handle message.{e}") logger.debug(e, exc_info=True) if self.debug_mode: raise diff --git a/rasa/core/channels/webexteams.py b/rasa/core/channels/webexteams.py index 0dcaa7dc2317..d5d689c17129 100644 --- a/rasa/core/channels/webexteams.py +++ b/rasa/core/channels/webexteams.py @@ -1,7 +1,9 @@ import logging from sanic import Blueprint, response from sanic.request import Request -from typing import Text, Optional, Dict, Any +from typing import Text, Optional, Dict, Any, Callable, Awaitable + +from sanic.response import HTTPResponse from webexteamssdk import WebexTeamsAPI, Webhook from rasa.core.channels.channel import InputChannel @@ -14,10 +16,10 @@ class WebexTeamsBot(OutputChannel): """A Cisco WebexTeams communication channel.""" @classmethod - def name(cls): + def name(cls) -> Text: return "webexteams" - def __init__(self, access_token, room): + def __init__(self, access_token: Optional[Text], room: Optional[Text]) -> None: self.room = room self.api = WebexTeamsAPI(access_token) @@ -25,7 +27,7 @@ async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: recipient = self.room or recipient_id - for message_part in text.split("\n\n"): + for message_part in text.strip().split("\n\n"): self.api.messages.create(roomId=recipient, text=message_part) async def send_image_url( @@ -37,7 +39,7 @@ async def send_image_url( async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: - json_message.setdefault("roomID", recipient_id) + json_message.setdefault("roomId", recipient_id) return self.api.messages.create(**json_message) @@ -45,15 +47,17 @@ class WebexTeamsInput(InputChannel): """WebexTeams input channel. Based on the HTTPInputChannel.""" @classmethod - def name(cls): + def name(cls) -> Text: return "webexteams" @classmethod - def from_credentials(cls, credentials): + def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel: if not credentials: cls.raise_missing_credentials_exception() + # pytype: disable=attribute-error return cls(credentials.get("access_token"), credentials.get("room")) + # pytype: enable=attribute-error def __init__(self, access_token: Text, room: Optional[Text] = None) -> None: """Create a Cisco Webex Teams input channel. @@ -69,27 +73,39 @@ def __init__(self, access_token: Text, room: Optional[Text] = None) -> None: self.room = room self.api = WebexTeamsAPI(access_token) - async def process_message(self, on_new_message, text, sender_id): + async def process_message( + self, + on_new_message: Callable[[UserMessage], Awaitable[Any]], + text: Optional[Text], + sender_id: Optional[Text], + metadata: Optional[Dict], + ) -> Any: try: out_channel = self.get_output_channel() user_msg = UserMessage( - text, out_channel, sender_id, input_channel=self.name() + text, + out_channel, + sender_id, + input_channel=self.name(), + metadata=metadata, ) await on_new_message(user_msg) except Exception as e: - logger.error("Exception when trying to handle message.{0}".format(e)) + logger.error(f"Exception when trying to handle message.{e}") logger.error(str(e), exc_info=True) - def blueprint(self, on_new_message): + def blueprint( + self, on_new_message: Callable[[UserMessage], Awaitable[Any]] + ) -> Blueprint: webexteams_webhook = Blueprint("webexteams_webhook", __name__) @webexteams_webhook.route("/", methods=["GET"]) - async def health(request: Request): + async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) @webexteams_webhook.route("/webhook", methods=["POST"]) - async def webhook(request: Request): + async def webhook(request: Request) -> HTTPResponse: """Respond to inbound webhook HTTP POST from Webex Teams.""" logger.debug("Received webex webhook call") @@ -110,8 +126,9 @@ async def webhook(request: Request): return response.text("OK") else: + metadata = self.get_metadata(request) await self.process_message( - on_new_message, text=message.text, sender_id=message.personId + on_new_message, message.text, message.roomId, metadata ) return response.text("") diff --git a/rasa/core/constants.py b/rasa/core/constants.py index 104b79fa0aa0..c1228489e968 100644 --- a/rasa/core/constants.py +++ b/rasa/core/constants.py @@ -1,3 +1,5 @@ +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME + DEFAULT_SERVER_PORT = 5005 DEFAULT_SERVER_FORMAT = "{}://localhost:{}" @@ -8,10 +10,10 @@ DEFAULT_CORE_FALLBACK_THRESHOLD = 0.0 -DEFAULT_FALLBACK_ACTION = "action_default_fallback" - DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes +DEFAULT_RESPONSE_TIMEOUT = 60 * 60 # 1 hour + DEFAULT_LOCK_LIFETIME = 60 # in seconds REQUESTED_SLOT = "requested_slot" @@ -24,17 +26,32 @@ # start of special user message section INTENT_MESSAGE_PREFIX = "/" +EXTERNAL_MESSAGE_PREFIX = "EXTERNAL: " USER_INTENT_RESTART = "restart" +USER_INTENT_SESSION_START = "session_start" + USER_INTENT_BACK = "back" USER_INTENT_OUT_OF_SCOPE = "out_of_scope" +DEFAULT_INTENTS = [ + USER_INTENT_RESTART, + USER_INTENT_BACK, + USER_INTENT_OUT_OF_SCOPE, + USER_INTENT_SESSION_START, + DEFAULT_NLU_FALLBACK_INTENT_NAME, +] + ACTION_NAME_SENDER_ID_CONNECTOR_STR = "__sender_id:" BEARER_TOKEN_PREFIX = "Bearer " +# Key to access data in the event metadata +# It specifies if an event was caused by an external entity (e.g. a sensor). +IS_EXTERNAL = "is_external" + # the lowest priority intended to be used by machine learning policies DEFAULT_POLICY_PRIORITY = 1 # the priority intended to be used by mapping policies @@ -50,3 +67,17 @@ FORM_POLICY_PRIORITY = 5 UTTER_PREFIX = "utter_" RESPOND_PREFIX = "respond_" + +DIALOGUE = "dialogue" +DEFAULT_CATEGORICAL_SLOT_VALUE = "__other__" + +# RabbitMQ message property header added to events published using `rasa export` +RASA_EXPORT_PROCESS_ID_HEADER_NAME = "rasa-export-process-id" + +# Name of the environment variable defining the PostgreSQL schema to access. See +# https://www.postgresql.org/docs/9.1/ddl-schemas.html for more details. +POSTGRESQL_SCHEMA = "POSTGRESQL_SCHEMA" + +# Names of the environment variables defining PostgreSQL pool size and max overflow +POSTGRESQL_POOL_SIZE = "SQL_POOL_SIZE" +POSTGRESQL_MAX_OVERFLOW = "SQL_MAX_OVERFLOW" diff --git a/rasa/core/conversation.py b/rasa/core/conversation.py index 22ae474ed00d..48e115a9645b 100644 --- a/rasa/core/conversation.py +++ b/rasa/core/conversation.py @@ -1,21 +1,41 @@ -import typing -from typing import List, Text +from typing import Dict, List, Text, Any -if typing.TYPE_CHECKING: - from rasa.core.events import Event +from rasa.core.events import Event -class Dialogue(object): +class Dialogue: """A dialogue comprises a list of Turn objects""" def __init__(self, name: Text, events: List["Event"]) -> None: - # This function initialises the dialogue with - # the dialogue name and the event list. + """This function initialises the dialogue with the dialogue name and the event + list.""" self.name = name self.events = events def __str__(self) -> Text: - # This function returns the dialogue and turns. + """This function returns the dialogue and turns.""" return "Dialogue with name '{}' and turns:\n{}".format( - self.name, "\n\n".join(["\t{}".format(t) for t in self.events]) + self.name, "\n\n".join([f"\t{t}" for t in self.events]) + ) + + def as_dict(self) -> Dict: + """This function returns the dialogue as a dictionary to assist in + serialization.""" + return {"events": [event.as_dict() for event in self.events], "name": self.name} + + @classmethod + def from_parameters(cls, parameters: Dict[Text, Any]) -> "Dialogue": + """Create `Dialogue` from parameters. + + Args: + parameters: Serialised dialogue, should contain keys 'name' and 'events'. + + Returns: + Deserialised `Dialogue`. + + """ + + return cls( + parameters.get("name"), + [Event.from_parameters(evt) for evt in parameters.get("events")], ) diff --git a/rasa/core/domain.py b/rasa/core/domain.py index 73de46f3da4f..e6a05572df9e 100644 --- a/rasa/core/domain.py +++ b/rasa/core/domain.py @@ -1,35 +1,74 @@ import collections +import copy import json import logging import os import typing -from typing import Any, Dict, List, Optional, Text, Tuple, Union, Set +from pathlib import Path +from typing import Any, Dict, List, NamedTuple, Optional, Set, Text, Tuple, Union + +from ruamel.yaml import YAMLError import rasa.core.constants -import rasa.utils.common as common_utils +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils.common import ( + raise_warning, + lazy_property, + sort_list_of_dicts_by_first_key, +) import rasa.utils.io -from rasa.cli.utils import bcolors -from rasa.constants import DOMAIN_SCHEMA_FILE +from rasa.cli.utils import bcolors, wrap_with_color +from rasa.constants import ( + DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION, + DOMAIN_SCHEMA_FILE, + DOCS_URL_DOMAINS, + DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, +) from rasa.core import utils from rasa.core.actions import action # pytype: disable=pyi-error from rasa.core.actions.action import Action # pytype: disable=pyi-error from rasa.core.constants import ( - REQUESTED_SLOT, DEFAULT_KNOWLEDGE_BASE_ACTION, - SLOT_LISTED_ITEMS, - SLOT_LAST_OBJECT_TYPE, + REQUESTED_SLOT, SLOT_LAST_OBJECT, + SLOT_LAST_OBJECT_TYPE, + SLOT_LISTED_ITEMS, + DEFAULT_INTENTS, ) from rasa.core.events import SlotSet, UserUttered -from rasa.core.slots import Slot, UnfeaturizedSlot +from rasa.core.slots import Slot, UnfeaturizedSlot, CategoricalSlot from rasa.utils.endpoints import EndpointConfig -from rasa.utils.validation import validate_yaml_schema, InvalidYamlFileError +from rasa.utils.validation import InvalidYamlFileError, validate_yaml_schema logger = logging.getLogger(__name__) PREV_PREFIX = "prev_" ACTIVE_FORM_PREFIX = "active_form_" +CARRY_OVER_SLOTS_KEY = "carry_over_slots_to_new_session" +SESSION_EXPIRATION_TIME_KEY = "session_expiration_time" +SESSION_CONFIG_KEY = "session_config" +USED_ENTITIES_KEY = "used_entities" +USE_ENTITIES_KEY = "use_entities" +IGNORE_ENTITIES_KEY = "ignore_entities" + +KEY_SLOTS = "slots" +KEY_INTENTS = "intents" +KEY_ENTITIES = "entities" +KEY_RESPONSES = "responses" +KEY_ACTIONS = "actions" +KEY_FORMS = "forms" + +ALL_DOMAIN_KEYS = [ + KEY_SLOTS, + KEY_FORMS, + KEY_ACTIONS, + KEY_ENTITIES, + KEY_INTENTS, + KEY_RESPONSES, +] + + if typing.TYPE_CHECKING: from rasa.core.trackers import DialogueStateTracker @@ -37,19 +76,35 @@ class InvalidDomain(Exception): """Exception that can be raised when domain is not valid.""" - def __init__(self, message): + def __init__(self, message) -> None: self.message = message def __str__(self): # return message in error colours - return bcolors.FAIL + self.message + bcolors.ENDC + return wrap_with_color(self.message, color=bcolors.FAIL) + + +class SessionConfig(NamedTuple): + session_expiration_time: float # in minutes + carry_over_slots: bool + + @staticmethod + def default() -> "SessionConfig": + # TODO: 2.0, reconsider how to apply sessions to old projects + return SessionConfig( + DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, + DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION, + ) + def are_sessions_enabled(self) -> bool: + return self.session_expiration_time > 0 -class Domain(object): + +class Domain: """The domain specifies the universe in which the bot's policy acts. A Domain subclass provides the actions the bot can take, the intents - and entities it can recognise""" + and entities it can recognise.""" @classmethod def empty(cls) -> "Domain": @@ -90,45 +145,76 @@ def from_path(cls, path: Text) -> "Domain": @classmethod def from_file(cls, path: Text) -> "Domain": - return cls.from_yaml(rasa.utils.io.read_file(path)) + return cls.from_yaml(rasa.utils.io.read_file(path), path) @classmethod - def from_yaml(cls, yaml: Text) -> "Domain": + def from_yaml(cls, yaml: Text, original_filename: Text = "") -> "Domain": + from rasa.validator import Validator + try: validate_yaml_schema(yaml, DOMAIN_SCHEMA_FILE) except InvalidYamlFileError as e: raise InvalidDomain(str(e)) data = rasa.utils.io.read_yaml(yaml) + if not Validator.validate_training_data_format_version(data, original_filename): + return Domain.empty() + return cls.from_dict(data) @classmethod def from_dict(cls, data: Dict) -> "Domain": - utter_templates = cls.collect_templates(data.get("templates", {})) - slots = cls.collect_slots(data.get("slots", {})) + utter_templates = cls.collect_templates(data.get(KEY_RESPONSES, {})) + if "templates" in data: + raise_warning( + "Your domain file contains the key: 'templates'. This has been " + "deprecated and renamed to 'responses'. The 'templates' key will " + "no longer work in future versions of Rasa. Please replace " + "'templates' with 'responses'", + FutureWarning, + docs=DOCS_URL_DOMAINS, + ) + utter_templates = cls.collect_templates(data.get("templates", {})) + + slots = cls.collect_slots(data.get(KEY_SLOTS, {})) additional_arguments = data.get("config", {}) - intents = data.get("intents", {}) + session_config = cls._get_session_config(data.get(SESSION_CONFIG_KEY, {})) + intents = data.get(KEY_INTENTS, {}) return cls( intents, - data.get("entities", []), + data.get(KEY_ENTITIES, []), slots, utter_templates, - data.get("actions", []), - data.get("forms", []), - **additional_arguments + data.get(KEY_ACTIONS, []), + data.get(KEY_FORMS, []), + session_config=session_config, + **additional_arguments, ) + @staticmethod + def _get_session_config(session_config: Dict) -> SessionConfig: + session_expiration_time_min = session_config.get(SESSION_EXPIRATION_TIME_KEY) + + # TODO: 2.0 reconsider how to apply sessions to old projects and legacy trackers + if session_expiration_time_min is None: + session_expiration_time_min = DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES + + carry_over_slots = session_config.get( + CARRY_OVER_SLOTS_KEY, DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION + ) + + return SessionConfig(session_expiration_time_min, carry_over_slots) + @classmethod def from_directory(cls, path: Text) -> "Domain": """Loads and merges multiple domain files recursively from a directory tree.""" - from rasa import data domain = Domain.empty() - for root, _, files in os.walk(path): + for root, _, files in os.walk(path, followlinks=True): for file in files: full_path = os.path.join(root, file) - if data.is_domain_file(full_path): + if Domain.is_domain_file(full_path): other = Domain.from_file(full_path) domain = other.merge(domain) @@ -162,35 +248,51 @@ def merge_dicts( def merge_lists(l1: List[Any], l2: List[Any]) -> List[Any]: return sorted(list(set(l1 + l2))) + def merge_lists_of_dicts( + dict_list1: List[Dict], + dict_list2: List[Dict], + override_existing_values: bool = False, + ) -> List[Dict]: + dict1 = {list(i.keys())[0]: i for i in dict_list1} + dict2 = {list(i.keys())[0]: i for i in dict_list2} + merged_dicts = merge_dicts(dict1, dict2, override_existing_values) + return list(merged_dicts.values()) + if override: config = domain_dict["config"] for key, val in config.items(): # pytype: disable=attribute-error combined["config"][key] = val - # intents is list of dicts - intents_1 = {list(i.keys())[0]: i for i in combined["intents"]} - intents_2 = {list(i.keys())[0]: i for i in domain_dict["intents"]} - merged_intents = merge_dicts(intents_1, intents_2, override) - combined["intents"] = list(merged_intents.values()) + if override or self.session_config == SessionConfig.default(): + combined[SESSION_CONFIG_KEY] = domain_dict[SESSION_CONFIG_KEY] + + combined[KEY_INTENTS] = merge_lists_of_dicts( + combined[KEY_INTENTS], domain_dict[KEY_INTENTS], override + ) + combined[KEY_FORMS] = merge_lists_of_dicts( + combined[KEY_FORMS], domain_dict[KEY_FORMS], override + ) # remove existing forms from new actions - for form in combined["forms"]: - if form in domain_dict["actions"]: - domain_dict["actions"].remove(form) + for form in combined[KEY_FORMS]: + if form in domain_dict[KEY_ACTIONS]: + domain_dict[KEY_ACTIONS].remove(form) - for key in ["entities", "actions", "forms"]: + for key in [KEY_ENTITIES, KEY_ACTIONS]: combined[key] = merge_lists(combined[key], domain_dict[key]) - for key in ["templates", "slots"]: + for key in [KEY_RESPONSES, KEY_SLOTS]: combined[key] = merge_dicts(combined[key], domain_dict[key], override) return self.__class__.from_dict(combined) @staticmethod - def collect_slots(slot_dict): + def collect_slots(slot_dict: Dict[Text, Any]) -> List[Slot]: # it is super important to sort the slots here!!! # otherwise state ordering is not consistent slots = [] + # make a copy to not alter the input dictionary + slot_dict = copy.deepcopy(slot_dict) for slot_name in sorted(slot_dict): slot_class = Slot.resolve_by_type(slot_dict[slot_name].get("type")) if "type" in slot_dict[slot_name]: @@ -200,67 +302,152 @@ def collect_slots(slot_dict): return slots @staticmethod + def _transform_intent_properties_for_internal_use( + intent: Dict[Text, Any], entities: List + ) -> Dict[Text, Any]: + """Transform intent properties coming from a domain file for internal use. + + In domain files, `use_entities` or `ignore_entities` is used. Internally, there + is a property `used_entities` instead that lists all entities to be used. + + Args: + intent: The intents as provided by a domain file. + entities: All entities as provided by a domain file. + + Returns: + The intents as they should be used internally. + """ + name, properties = list(intent.items())[0] + + properties.setdefault(USE_ENTITIES_KEY, True) + properties.setdefault(IGNORE_ENTITIES_KEY, []) + if not properties[USE_ENTITIES_KEY]: # this covers False, None and [] + properties[USE_ENTITIES_KEY] = [] + + # `use_entities` is either a list of explicitly included entities + # or `True` if all should be included + if properties[USE_ENTITIES_KEY] is True: + included_entities = set(entities) + else: + included_entities = set(properties[USE_ENTITIES_KEY]) + excluded_entities = set(properties[IGNORE_ENTITIES_KEY]) + used_entities = list(included_entities - excluded_entities) + used_entities.sort() + + # Only print warning for ambiguous configurations if entities were included + # explicitly. + explicitly_included = isinstance(properties[USE_ENTITIES_KEY], list) + ambiguous_entities = included_entities.intersection(excluded_entities) + if explicitly_included and ambiguous_entities: + raise_warning( + f"Entities: '{ambiguous_entities}' are explicitly included and" + f" excluded for intent '{name}'." + f"Excluding takes precedence in this case. " + f"Please resolve that ambiguity.", + docs=f"{DOCS_URL_DOMAINS}#ignoring-entities-for-certain-intents", + ) + + properties[USED_ENTITIES_KEY] = used_entities + del properties[USE_ENTITIES_KEY] + del properties[IGNORE_ENTITIES_KEY] + + return intent + + @classmethod def collect_intent_properties( - intents: List[Union[Text, Dict[Text, Any]]] + cls, intents: List[Union[Text, Dict[Text, Any]]], entities: List[Text] ) -> Dict[Text, Dict[Text, Union[bool, List]]]: + """Get intent properties for a domain from what is provided by a domain file. + + Args: + intents: The intents as provided by a domain file. + entities: All entities as provided by a domain file. + + Returns: + The intent properties to be stored in the domain. + """ + # make a copy to not alter the input argument + intents = copy.deepcopy(intents) intent_properties = {} + duplicates = set() + for intent in intents: - if isinstance(intent, dict): - name = list(intent.keys())[0] - for properties in intent.values(): - properties.setdefault("use_entities", True) - properties.setdefault("ignore_entities", []) - if ( - properties["use_entities"] is None - or properties["use_entities"] is False - ): - properties["use_entities"] = [] - else: - name = intent - intent = {intent: {"use_entities": True, "ignore_entities": []}} + intent_name, properties = cls._intent_properties(intent, entities) - if name in intent_properties.keys(): - raise InvalidDomain( - "Intents are not unique! Found two intents with name '{}'. " - "Either rename or remove one of them.".format(name) - ) + if intent_name in intent_properties.keys(): + duplicates.add(intent_name) + + intent_properties.update(properties) + + if duplicates: + raise InvalidDomain( + f"Intents are not unique! Found multiple intents with name(s) {sorted(duplicates)}. " + f"Either rename or remove the duplicate ones." + ) + + cls._add_default_intents(intent_properties, entities) - intent_properties.update(intent) return intent_properties + @classmethod + def _intent_properties( + cls, intent: Union[Text, Dict[Text, Any]], entities: List[Text] + ) -> Tuple[Text, Dict[Text, Any]]: + if not isinstance(intent, dict): + intent_name = intent + intent = {intent_name: {USE_ENTITIES_KEY: True, IGNORE_ENTITIES_KEY: []}} + else: + intent_name = list(intent.keys())[0] + + return ( + intent_name, + cls._transform_intent_properties_for_internal_use(intent, entities), + ) + + @classmethod + def _add_default_intents( + cls, + intent_properties: Dict[Text, Dict[Text, Union[bool, List]]], + entities: List[Text], + ) -> None: + for intent_name in DEFAULT_INTENTS: + if intent_name not in intent_properties: + _, properties = cls._intent_properties(intent_name, entities) + intent_properties.update(properties) + @staticmethod def collect_templates( yml_templates: Dict[Text, List[Any]] ) -> Dict[Text, List[Dict[Text, Any]]]: - """Go through the templates and make sure they are all in dict format - """ + """Go through the templates and make sure they are all in dict format.""" templates = {} for template_key, template_variations in yml_templates.items(): validated_variations = [] if template_variations is None: raise InvalidDomain( - "Utterance '{}' does not have any defined templates.".format( + "Response '{}' does not have any defined variations.".format( template_key ) ) for t in template_variations: - # templates should be a dict with options + # responses should be a dict with options if isinstance(t, str): - logger.warning( - "Deprecated: Templates should not be strings anymore. " - "Utterance template '{}' should contain either '- text: ' or " - "'- custom: ' attribute to be a proper template.".format( - template_key - ) + raise_warning( + f"Responses should not be strings anymore. " + f"Response '{template_key}' should contain " + f"either a '- text: ' or a '- custom: ' " + f"attribute to be a proper response.", + FutureWarning, + docs=DOCS_URL_DOMAINS + "#responses", ) validated_variations.append({"text": t}) elif "text" not in t and "custom" not in t: raise InvalidDomain( - "Utter template '{}' needs to contain either " - "'- text: ' or '- custom: ' attribute to be a proper " - "template.".format(template_key) + f"Response '{template_key}' needs to contain either " + f"'- text: ' or '- custom: ' attribute to be a proper " + f"response." ) else: validated_variations.append(t) @@ -273,59 +460,82 @@ def __init__( intents: Union[Set[Text], List[Union[Text, Dict[Text, Any]]]], entities: List[Text], slots: List[Slot], - templates: Dict[Text, Any], + templates: Dict[Text, List[Dict[Text, Any]]], action_names: List[Text], - form_names: List[Text], + forms: List[Union[Text, Dict]], store_entities_as_slots: bool = True, + session_config: SessionConfig = SessionConfig.default(), ) -> None: - self.intent_properties = self.collect_intent_properties(intents) + self.intent_properties = self.collect_intent_properties(intents, entities) self.entities = entities - self.form_names = form_names + + # Forms used to be a list of form names. Now they can also contain + # `SlotMapping`s + if not forms or (forms and isinstance(forms[0], str)): + self.form_names = forms + self.forms: List[Dict] = [{form_name: {}} for form_name in forms] + elif isinstance(forms[0], dict): + self.forms: List[Dict] = forms + self.form_names = [list(f.keys())[0] for f in forms] + self.slots = slots self.templates = templates + self.session_config = session_config + + self._custom_actions = action_names # only includes custom actions and utterance actions - self.user_actions = action_names + self.user_actions = action.combine_with_templates(action_names, templates) + # includes all actions (custom, utterance, default actions and forms) self.action_names = ( - action.combine_user_with_default_actions(action_names) + form_names + action.combine_user_with_default_actions(self.user_actions) + + self.form_names ) - self.store_entities_as_slots = store_entities_as_slots + self.store_entities_as_slots = store_entities_as_slots self._check_domain_sanity() def __hash__(self) -> int: - from rasa.utils.common import sort_list_of_dicts_by_first_key self_as_dict = self.as_dict() - self_as_dict["intents"] = sort_list_of_dicts_by_first_key( - self_as_dict["intents"] + self_as_dict[KEY_INTENTS] = sort_list_of_dicts_by_first_key( + self_as_dict[KEY_INTENTS] ) self_as_string = json.dumps(self_as_dict, sort_keys=True) text_hash = utils.get_text_hash(self_as_string) return int(text_hash, 16) - @common_utils.lazy_property + @lazy_property def user_actions_and_forms(self): - """Returns combination of user actions and forms""" + """Returns combination of user actions and forms.""" return self.user_actions + self.form_names - @common_utils.lazy_property + @lazy_property def num_actions(self): """Returns the number of available actions.""" # noinspection PyTypeChecker return len(self.action_names) - @common_utils.lazy_property + @lazy_property def num_states(self): """Number of used input states for the action prediction.""" return len(self.input_states) + def add_categorical_slot_default_value(self) -> None: + """Add a default value to all categorical slots. + + All unseen values found for the slot will be mapped to this default value + for featurization. + """ + for slot in [s for s in self.slots if type(s) is CategoricalSlot]: + slot.add_default_value() + def add_requested_slot(self) -> None: """Add a slot called `requested_slot` to the list of slots. @@ -335,7 +545,7 @@ def add_requested_slot(self) -> None: if self.form_names and REQUESTED_SLOT not in [s.name for s in self.slots]: self.slots.append(UnfeaturizedSlot(REQUESTED_SLOT)) - def add_knowledge_base_slots(self): + def add_knowledge_base_slots(self) -> None: """ Add slots for the knowledge base action to the list of slots, if the default knowledge base action name is present. @@ -363,13 +573,20 @@ def add_knowledge_base_slots(self): def action_for_name( self, action_name: Text, action_endpoint: Optional[EndpointConfig] ) -> Optional[Action]: - """Looks up which action corresponds to this action name.""" + """Look up which action corresponds to this action name.""" if action_name not in self.action_names: self._raise_action_not_found_exception(action_name) + should_use_form_action = ( + action_name in self.form_names and self.slot_mapping_for_form(action_name) + ) + return action.action_from_name( - action_name, action_endpoint, self.user_actions_and_forms + action_name, + action_endpoint, + self.user_actions_and_forms, + should_use_form_action, ) def action_for_index( @@ -388,30 +605,29 @@ def action_for_index( return self.action_for_name(self.action_names[index], action_endpoint) - def actions(self, action_endpoint): + def actions(self, action_endpoint) -> List[Optional[Action]]: return [ self.action_for_name(name, action_endpoint) for name in self.action_names ] def index_for_action(self, action_name: Text) -> Optional[int]: - """Looks up which action index corresponds to this action name""" + """Look up which action index corresponds to this action name.""" try: return self.action_names.index(action_name) except ValueError: self._raise_action_not_found_exception(action_name) - def _raise_action_not_found_exception(self, action_name): - action_names = "\n".join(["\t - {}".format(a) for a in self.action_names]) + def _raise_action_not_found_exception(self, action_name) -> typing.NoReturn: + action_names = "\n".join([f"\t - {a}" for a in self.action_names]) raise NameError( - "Cannot access action '{}', " - "as that name is not a registered " - "action for this domain. " - "Available actions are: \n{}" - "".format(action_name, action_names) + f"Cannot access action '{action_name}', " + f"as that name is not a registered " + f"action for this domain. " + f"Available actions are: \n{action_names}" ) - def random_template_for(self, utter_action): + def random_template_for(self, utter_action: Text) -> Optional[Dict[Text, Any]]: import numpy as np if utter_action in self.templates: @@ -420,53 +636,53 @@ def random_template_for(self, utter_action): return None # noinspection PyTypeChecker - @common_utils.lazy_property + @lazy_property def slot_states(self) -> List[Text]: """Returns all available slot state strings.""" return [ - "slot_{}_{}".format(s.name, i) + f"slot_{s.name}_{i}" for s in self.slots for i in range(0, s.feature_dimensionality()) ] # noinspection PyTypeChecker - @common_utils.lazy_property + @lazy_property def prev_action_states(self) -> List[Text]: """Returns all available previous action state strings.""" return [PREV_PREFIX + a for a in self.action_names] # noinspection PyTypeChecker - @common_utils.lazy_property + @lazy_property def intent_states(self) -> List[Text]: """Returns all available previous action state strings.""" - return ["intent_{0}".format(i) for i in self.intents] + return [f"intent_{i}" for i in self.intents] # noinspection PyTypeChecker - @common_utils.lazy_property + @lazy_property def entity_states(self) -> List[Text]: """Returns all available previous action state strings.""" - return ["entity_{0}".format(e) for e in self.entities] + return [f"entity_{e}" for e in self.entities] # noinspection PyTypeChecker - @common_utils.lazy_property + @lazy_property def form_states(self) -> List[Text]: - return ["active_form_{0}".format(f) for f in self.form_names] + return [f"active_form_{f}" for f in self.form_names] def index_of_state(self, state_name: Text) -> Optional[int]: - """Provides the index of a state.""" + """Provide the index of a state.""" return self.input_state_map.get(state_name) - @common_utils.lazy_property + @lazy_property def input_state_map(self) -> Dict[Text, int]: - """Provides a mapping from state names to indices.""" + """Provide a mapping from state names to indices.""" return {f: i for i, f in enumerate(self.input_states)} - @common_utils.lazy_property + @lazy_property def input_states(self) -> List[Text]: """Returns all available states.""" @@ -488,66 +704,56 @@ def get_parsing_states(self, tracker: "DialogueStateTracker") -> Dict[Text, floa if not latest_message: return state_dict - intent_name = latest_message.intent.get("name") + intent_name = latest_message.intent.get(INTENT_NAME_KEY) if intent_name: for entity_name in self._get_featurized_entities(latest_message): - key = "entity_{0}".format(entity_name) + key = f"entity_{entity_name}" state_dict[key] = 1.0 # Set all set slots with the featurization of the stored value for key, slot in tracker.slots.items(): if slot is not None: - for i, slot_value in enumerate(slot.as_feature()): - if slot_value != 0: - slot_id = "slot_{}_{}".format(key, i) - state_dict[slot_id] = slot_value + if slot.value == "None" and slot.as_feature(): + # TODO: this is a hack to make a rule know + # that slot or form should not be set + # but only if the slot is featurized + slot_id = f"slot_{key}_None" + state_dict[slot_id] = 1 + else: + for i, slot_value in enumerate(slot.as_feature()): + if slot_value != 0: + slot_id = f"slot_{key}_{i}" + state_dict[slot_id] = slot_value if "intent_ranking" in latest_message.parse_data: for intent in latest_message.parse_data["intent_ranking"]: - if intent.get("name"): - intent_id = "intent_{}".format(intent["name"]) + if intent.get(INTENT_NAME_KEY): + intent_id = "intent_{}".format(intent[INTENT_NAME_KEY]) state_dict[intent_id] = intent["confidence"] elif intent_name: - intent_id = "intent_{}".format(latest_message.intent["name"]) + intent_id = "intent_{}".format(latest_message.intent[INTENT_NAME_KEY]) state_dict[intent_id] = latest_message.intent.get("confidence", 1.0) return state_dict def _get_featurized_entities(self, latest_message: UserUttered) -> Set[Text]: - intent_name = latest_message.intent.get("name") + intent_name = latest_message.intent.get(INTENT_NAME_KEY) intent_config = self.intent_config(intent_name) entities = latest_message.entities entity_names = { entity["entity"] for entity in entities if "entity" in entity.keys() } - # `use_entities` is either a list of explicitly included entities - # or `True` if all should be included - include = intent_config.get("use_entities", True) - included_entities = set(entity_names if include is True else include) - excluded_entities = set(intent_config.get("ignore_entities", [])) - wanted_entities = included_entities - excluded_entities - - # Only print warning for ambiguous configurations if entities were included - # explicitly. - explicitly_included = isinstance(include, list) - ambiguous_entities = included_entities.intersection(excluded_entities) - if explicitly_included and ambiguous_entities: - logger.warning( - "Entities: '{}' are explicitly included and excluded for intent '{}'. " - "Excluding takes precedence in this case. " - "Please resolve that ambiguity." - "".format(ambiguous_entities, intent_name) - ) + wanted_entities = set(intent_config.get(USED_ENTITIES_KEY, entity_names)) return entity_names.intersection(wanted_entities) def get_prev_action_states( self, tracker: "DialogueStateTracker" ) -> Dict[Text, float]: - """Turns the previous taken action into a state name.""" + """Turn the previous taken action into a state name.""" latest_action = tracker.latest_action_name if latest_action: @@ -555,29 +761,21 @@ def get_prev_action_states( if prev_action_name in self.input_state_map: return {prev_action_name: 1.0} else: - logger.warning( - "Failed to use action '{}' in history. " - "Please make sure all actions are listed in the " - "domains action list. If you recently removed an " - "action, don't worry about this warning. It " - "should stop appearing after a while. " - "".format(latest_action) - ) return {} else: return {} @staticmethod def get_active_form(tracker: "DialogueStateTracker") -> Dict[Text, float]: - """Turns tracker's active form into a state name.""" - form = tracker.active_form.get("name") + """Turn tracker's active form into a state name.""" + form = tracker.active_loop.get("name") if form is not None: return {ACTIVE_FORM_PREFIX + form: 1.0} else: return {} def get_active_states(self, tracker: "DialogueStateTracker") -> Dict[Text, float]: - """Return a bag of active states from the tracker state""" + """Return a bag of active states from the tracker state.""" state_dict = self.get_parsing_states(tracker) state_dict.update(self.get_prev_action_states(tracker)) state_dict.update(self.get_active_form(tracker)) @@ -591,7 +789,7 @@ def states_for_tracker_history( self.get_active_states(tr) for tr in tracker.generate_all_prior_trackers() ] - def slots_for_entities(self, entities): + def slots_for_entities(self, entities: List[Dict[Text, Any]]) -> List[SlotSet]: if self.store_entities_as_slots: slot_events = [] for s in self.slots: @@ -609,13 +807,13 @@ def slots_for_entities(self, entities): return [] def persist_specification(self, model_path: Text) -> None: - """Persists the domain specification to storage.""" + """Persist the domain specification to storage.""" domain_spec_path = os.path.join(model_path, "domain.json") rasa.utils.io.create_directory_for_file(domain_spec_path) metadata = {"states": self.input_states} - utils.dump_obj_as_json_to_file(domain_spec_path, metadata) + rasa.utils.io.dump_obj_as_json_to_file(domain_spec_path, metadata) @classmethod def load_specification(cls, path: Text) -> Dict[Text, Any]: @@ -626,7 +824,7 @@ def load_specification(cls, path: Text) -> Dict[Text, Any]: return specification def compare_with_specification(self, path: Text) -> bool: - """Compares the domain spec of the current and the loaded domain. + """Compare the domain spec of the current and the loaded domain. Throws exception if the loaded domain specification is different to the current domain are different.""" @@ -638,52 +836,95 @@ def compare_with_specification(self, path: Text) -> bool: missing = ",".join(set(states) - set(self.input_states)) additional = ",".join(set(self.input_states) - set(states)) raise InvalidDomain( - "Domain specification has changed. " - "You MUST retrain the policy. " - + "Detected mismatch in domain specification. " - + "The following states have been \n" - "\t - removed: {} \n" - "\t - added: {} ".format(missing, additional) + f"Domain specification has changed. " + f"You MUST retrain the policy. " + f"Detected mismatch in domain specification. " + f"The following states have been \n" + f"\t - removed: {missing} \n" + f"\t - added: {additional} " ) else: return True - def _slot_definitions(self): + def _slot_definitions(self) -> Dict[Any, Dict[str, Any]]: return {slot.name: slot.persistence_info() for slot in self.slots} def as_dict(self) -> Dict[Text, Any]: - additional_config = {"store_entities_as_slots": self.store_entities_as_slots} return { - "config": additional_config, - "intents": [{k: v} for k, v in self.intent_properties.items()], - "entities": self.entities, - "slots": self._slot_definitions(), - "templates": self.templates, - "actions": self.user_actions, # class names of the actions - "forms": self.form_names, + "config": {"store_entities_as_slots": self.store_entities_as_slots}, + SESSION_CONFIG_KEY: { + SESSION_EXPIRATION_TIME_KEY: self.session_config.session_expiration_time, + CARRY_OVER_SLOTS_KEY: self.session_config.carry_over_slots, + }, + KEY_INTENTS: self._transform_intents_for_file(), + KEY_ENTITIES: self.entities, + KEY_SLOTS: self._slot_definitions(), + KEY_RESPONSES: self.templates, + KEY_ACTIONS: self._custom_actions, # class names of the actions + KEY_FORMS: self.forms, } - def persist(self, filename: Text) -> None: + def persist(self, filename: Union[Text, Path]) -> None: """Write domain to a file.""" domain_data = self.as_dict() - utils.dump_obj_as_yaml_to_file(filename, domain_data) + utils.dump_obj_as_yaml_to_file( + filename, domain_data, should_preserve_key_order=True + ) + + def _transform_intents_for_file(self) -> List[Union[Text, Dict[Text, Any]]]: + """Transform intent properties for displaying or writing into a domain file. + + Internally, there is a property `used_entities` that lists all entities to be + used. In domain files, `use_entities` or `ignore_entities` is used instead to + list individual entities to ex- or include, because this is easier to read. + + Returns: + The intent properties as they are used in domain files. + """ + intent_properties = copy.deepcopy(self.intent_properties) + intents_for_file = [] + + for intent_name, intent_props in intent_properties.items(): + if intent_name in DEFAULT_INTENTS: + # Default intents should be not dumped with the domain + continue + use_entities = set(intent_props[USED_ENTITIES_KEY]) + ignore_entities = set(self.entities) - use_entities + if len(use_entities) == len(self.entities): + intent_props[USE_ENTITIES_KEY] = True + elif len(use_entities) <= len(self.entities) / 2: + intent_props[USE_ENTITIES_KEY] = list(use_entities) + else: + intent_props[IGNORE_ENTITIES_KEY] = list(ignore_entities) + intent_props.pop(USED_ENTITIES_KEY) + intents_for_file.append({intent_name: intent_props}) + + return intents_for_file def cleaned_domain(self) -> Dict[Text, Any]: - """Fetch cleaned domain, replacing redundant keys with default values.""" + """Fetch cleaned domain to display or write into a file. + The internal `used_entities` property is replaced by `use_entities` or + `ignore_entities` and redundant keys are replaced with default values + to make the domain easier readable. + + Returns: + A cleaned dictionary version of the domain. + """ domain_data = self.as_dict() - for idx, intent_info in enumerate(domain_data["intents"]): + + for idx, intent_info in enumerate(domain_data[KEY_INTENTS]): for name, intent in intent_info.items(): - if intent.get("use_entities") is True: - intent.pop("use_entities") - if not intent.get("ignore_entities"): - intent.pop("ignore_entities", None) + if intent.get(USE_ENTITIES_KEY) is True: + del intent[USE_ENTITIES_KEY] + if not intent.get(IGNORE_ENTITIES_KEY): + intent.pop(IGNORE_ENTITIES_KEY, None) if len(intent) == 0: - domain_data["intents"][idx] = name + domain_data[KEY_INTENTS][idx] = name - for slot in domain_data["slots"].values(): # pytype: disable=attribute-error + for slot in domain_data[KEY_SLOTS].values(): # pytype: disable=attribute-error if slot["initial_value"] is None: del slot["initial_value"] if slot["auto_fill"]: @@ -705,9 +946,11 @@ def persist_clean(self, filename: Text) -> None: """Write cleaned domain to a file.""" cleaned_domain_data = self.cleaned_domain() - utils.dump_obj_as_yaml_to_file(filename, cleaned_domain_data) + utils.dump_obj_as_yaml_to_file( + filename, cleaned_domain_data, should_preserve_key_order=True + ) - def as_yaml(self, clean_before_dump=False): + def as_yaml(self, clean_before_dump: bool = False) -> Text: if clean_before_dump: domain_data = self.cleaned_domain() else: @@ -719,7 +962,7 @@ def intent_config(self, intent_name: Text) -> Dict[Text, Any]: """Return the configuration for an intent.""" return self.intent_properties.get(intent_name, {}) - @common_utils.lazy_property + @lazy_property def intents(self): return sorted(self.intent_properties.keys()) @@ -799,7 +1042,7 @@ def domain_warnings( "slot_warnings": slot_warnings, } - def _check_domain_sanity(self): + def _check_domain_sanity(self) -> None: """Make sure the domain is properly configured. If the domain contains any duplicate slots, intents, actions or entities, an InvalidDomain error is raised. This error @@ -867,9 +1110,9 @@ def get_duplicate_exception_message( if message: message += "\n" message += ( - "Duplicate {0} in domain. " - "These {0} occur more than once in " - "the domain: '{1}'".format(name, "', '".join(d)) + f"Duplicate {name} in domain. " + f"These {name} occur more than once in " + f"the domain: '{', '.join(d)}'." ) return message @@ -887,9 +1130,9 @@ def get_duplicate_exception_message( raise InvalidDomain( get_exception_message( [ - (duplicate_actions, "actions"), - (duplicate_slots, "slots"), - (duplicate_entities, "entities"), + (duplicate_actions, KEY_ACTIONS), + (duplicate_slots, KEY_SLOTS), + (duplicate_entities, KEY_ENTITIES), ], incorrect_mappings, ) @@ -908,18 +1151,62 @@ def check_missing_templates(self) -> None: if missing_templates: for template in missing_templates: - logger.warning( - "Utterance '{}' is listed as an " - "action in the domain file, but there is " - "no matching utterance template. Please " - "check your domain.".format(template) + raise_warning( + f"Action '{template}' is listed as a " + f"response action in the domain file, but there is " + f"no matching response defined. Please " + f"check your domain.", + docs=DOCS_URL_DOMAINS + "#responses", ) def is_empty(self) -> bool: - """Checks whether the domain is empty.""" + """Check whether the domain is empty.""" return self.as_dict() == Domain.empty().as_dict() + @staticmethod + def is_domain_file(filename: Text) -> bool: + """Checks whether the given file path is a Rasa domain file. + + Args: + filename: Path of the file which should be checked. + + Returns: + `True` if it's a domain file, otherwise `False`. + """ + from rasa.data import YAML_FILE_EXTENSIONS + + if not Path(filename).suffix in YAML_FILE_EXTENSIONS: + return False + try: + content = rasa.utils.io.read_yaml_file(filename) + if any(key in content for key in ALL_DOMAIN_KEYS): + return True + except YAMLError: + pass + + return False + + def slot_mapping_for_form(self, form_name: Text) -> Dict: + """Retrieve the slot mappings for a form which are defined in the domain. + + Options: + - an extracted entity + - intent: value pairs + - trigger_intent: value pairs + - a whole message + or a list of them, where the first match will be picked + + Args: + form_name: The name of the form. + + Returns: + The slot mapping or an empty dictionary in case no mapping was found. + """ + return next( + (form[form_name] for form in self.forms if form_name in form.keys()), {} + ) + class TemplateDomain(Domain): pass diff --git a/rasa/core/events/__init__.py b/rasa/core/events/__init__.py index 79d6de2c3769..0e13450c12e6 100644 --- a/rasa/core/events/__init__.py +++ b/rasa/core/events/__init__.py @@ -1,14 +1,24 @@ -import time -import typing - import json -import jsonpickle import logging +import re + +import jsonpickle +import time +import typing import uuid from dateutil import parser +from datetime import datetime from typing import List, Dict, Text, Any, Type, Optional from rasa.core import utils +from typing import Union + +from rasa.core.constants import ( + IS_EXTERNAL, + EXTERNAL_MESSAGE_PREFIX, + ACTION_NAME_SENDER_ID_CONNECTOR_STR, +) +from rasa.nlu.constants import INTENT_NAME_KEY if typing.TYPE_CHECKING: from rasa.core.trackers import DialogueStateTracker @@ -32,26 +42,27 @@ def deserialise_events(serialized_events: List[Dict[Text, Any]]) -> List["Event" deserialised.append(event) else: logger.warning( - "Ignoring event ({}) while deserialising " - "events. Couldn't parse it." + f"Unable to parse event '{event}' while deserialising. The event" + " will be ignored." ) return deserialised -def deserialise_entities(entities): +def deserialise_entities(entities: Union[Text, List[Any]]) -> List[Dict[Text, Any]]: if isinstance(entities, str): entities = json.loads(entities) return [e for e in entities if isinstance(e, dict)] -def md_format_message(text, intent, entities): - from rasa.nlu.training_data.formats import MarkdownWriter, MarkdownReader +def md_format_message(text, intent, entities) -> Text: + from rasa.nlu.training_data.formats import MarkdownReader + from rasa.nlu.training_data.formats.readerwriter import TrainingDataWriter - message_from_md = MarkdownReader()._parse_training_example(text) + message_from_md = MarkdownReader().parse_training_example(text) deserialised_entities = deserialise_entities(entities) - return MarkdownWriter()._generate_message_md( + return TrainingDataWriter.generate_message( { "text": message_from_md.text, "intent": intent, @@ -60,7 +71,7 @@ def md_format_message(text, intent, entities): ) -def first_key(d, default_key): +def first_key(d: Dict[Text, Any], default_key: Any) -> Any: if len(d) > 1: for k, v in d.items(): if k != default_key: @@ -73,22 +84,39 @@ def first_key(d, default_key): # noinspection PyProtectedMember -class Event(object): +class Event: """Events describe everything that occurs in a conversation and tell the :class:`rasa.core.trackers.DialogueStateTracker` how to update its state.""" type_name = "event" - def __init__(self, timestamp: Optional[float] = None): - self.timestamp = timestamp if timestamp else time.time() + def __init__( + self, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: + self.timestamp = timestamp or time.time() + self._metadata = metadata or {} + + @property + def metadata(self) -> Dict[Text, Any]: + # Needed for compatibility with Rasa versions <1.4.0. Previous versions + # of Rasa serialized trackers using the pickle module. For the moment, + # Rasa still supports loading these serialized trackers with pickle, + # but will use JSON in any subsequent save operations. Versions of + # trackers serialized with pickle won't include the `_metadata` + # attribute in their events, so it is necessary to define this getter + # in case the attribute does not exist. For more information see + # CHANGELOG.rst. + return getattr(self, "_metadata", {}) def __ne__(self, other: Any) -> bool: # Not strictly necessary, but to avoid having both x==y and x!=y # True at the same time return not (self == other) - def as_story_string(self) -> Text: + def as_story_string(self) -> Optional[Text]: raise NotImplementedError @staticmethod @@ -97,38 +125,40 @@ def from_story_string( parameters: Dict[Text, Any], default: Optional[Type["Event"]] = None, ) -> Optional[List["Event"]]: - event = Event.resolve_by_type(event_name, default) + event_class = Event.resolve_by_type(event_name, default) - if event: - return event._from_story_string(parameters) - else: + if not event_class: return None + return event_class._from_story_string(parameters) + @staticmethod def from_parameters( parameters: Dict[Text, Any], default: Optional[Type["Event"]] = None ) -> Optional["Event"]: event_name = parameters.get("event") - if event_name is not None: - copied = parameters.copy() - del copied["event"] + if event_name is None: + return None - event = Event.resolve_by_type(event_name, default) - if event: - return event._from_parameters(parameters) - else: - return None - else: + event_class: Optional[Type[Event]] = Event.resolve_by_type(event_name, default) + if not event_class: return None + return event_class._from_parameters(parameters) + @classmethod def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List["Event"]]: """Called to convert a parsed story line into an event.""" - return [cls(parameters.get("timestamp"))] + return [cls(parameters.get("timestamp"), parameters.get("metadata"))] + + def as_dict(self) -> Dict[Text, Any]: + d = {"event": self.type_name, "timestamp": self.timestamp} - def as_dict(self): - return {"event": self.type_name, "timestamp": self.timestamp} + if self.metadata: + d["metadata"] = self.metadata + + return d @classmethod def _from_parameters(cls, parameters: Dict[Text, Any]) -> Optional["Event"]: @@ -142,10 +172,10 @@ def _from_parameters(cls, parameters: Dict[Text, Any]) -> Optional["Event"]: result = cls._from_story_string(parameters) if len(result) > 1: logger.warning( - "Event from parameters called with parameters " - "for multiple events. This is not supported, " - "only the first event will be returned. " - "Parameters: {}".format(parameters) + f"Event from parameters called with parameters " + f"for multiple events. This is not supported, " + f"only the first event will be returned. " + f"Parameters: {parameters}" ) return result[0] if result else None @@ -164,7 +194,7 @@ def resolve_by_type( elif default is not None: return default else: - raise ValueError("Unknown event name '{}'.".format(type_name)) + raise ValueError(f"Unknown event name '{type_name}'.") def apply_to(self, tracker: "DialogueStateTracker") -> None: pass @@ -181,10 +211,10 @@ class UserUttered(Event): def __init__( self, text: Optional[Text] = None, - intent=None, - entities=None, + intent: Optional[Dict] = None, + entities: Optional[List[Dict]] = None, parse_data: Optional[Dict[Text, Any]] = None, - timestamp: Optional[int] = None, + timestamp: Optional[float] = None, input_channel: Optional[Text] = None, message_id: Optional[Text] = None, metadata: Optional[Dict] = None, @@ -194,7 +224,8 @@ def __init__( self.entities = entities if entities else [] self.input_channel = input_channel self.message_id = message_id - self.metadata = metadata + + super().__init__(timestamp, metadata) if parse_data: self.parse_data = parse_data @@ -207,13 +238,11 @@ def __init__( "metadata": self.metadata, } - super(UserUttered, self).__init__(timestamp) - @staticmethod def _from_parse_data( text: Text, parse_data: Dict[Text, Any], - timestamp: Optional[int] = None, + timestamp: Optional[float] = None, input_channel: Optional[Text] = None, message_id: Optional[Text] = None, metadata: Optional[Dict] = None, @@ -229,43 +258,47 @@ def _from_parse_data( metadata, ) - def __hash__(self): + def __hash__(self) -> int: return hash( - (self.text, self.intent.get("name"), jsonpickle.encode(self.entities)) + ( + self.text, + self.intent.get(INTENT_NAME_KEY), + jsonpickle.encode(self.entities), + ) ) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, UserUttered): return False else: return ( self.text, - self.intent.get("name"), + self.intent.get(INTENT_NAME_KEY), [jsonpickle.encode(ent) for ent in self.entities], ) == ( other.text, - other.intent.get("name"), + other.intent.get(INTENT_NAME_KEY), [jsonpickle.encode(ent) for ent in other.entities], ) - def __str__(self): + def __str__(self) -> Text: return "UserUttered(text: {}, intent: {}, entities: {})".format( self.text, self.intent, self.entities ) @staticmethod - def empty(): + def empty() -> "UserUttered": return UserUttered(None) def as_dict(self) -> Dict[Text, Any]: - _dict = super(UserUttered, self).as_dict() + _dict = super().as_dict() _dict.update( { "text": self.text, "parse_data": self.parse_data, "input_channel": getattr(self, "input_channel", None), "message_id": getattr(self, "message_id", None), - "metadata": getattr(self, "metadata", None), + "metadata": self.metadata, } ) return _dict @@ -284,9 +317,9 @@ def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event] ) ] except KeyError as e: - raise ValueError("Failed to parse bot uttered event. {}".format(e)) + raise ValueError(f"Failed to parse bot uttered event. {e}") - def as_story_string(self, e2e=False): + def as_story_string(self, e2e: bool = False) -> Text: if self.intent: if self.entities: ent_string = json.dumps( @@ -297,11 +330,11 @@ def as_story_string(self, e2e=False): ent_string = "" parse_string = "{intent}{entities}".format( - intent=self.intent.get("name", ""), entities=ent_string + intent=self.intent.get(INTENT_NAME_KEY, ""), entities=ent_string ) if e2e: message = md_format_message(self.text, self.intent, self.entities) - return "{}: {}".format(self.intent.get("name"), message) + return "{}: {}".format(self.intent.get(INTENT_NAME_KEY), message) else: return parse_string else: @@ -311,6 +344,17 @@ def apply_to(self, tracker: "DialogueStateTracker") -> None: tracker.latest_message = self tracker.clear_followup_action() + @staticmethod + def create_external( + intent_name: Text, entity_list: Optional[List[Dict[Text, Any]]] = None + ) -> "UserUttered": + return UserUttered( + text=f"{EXTERNAL_MESSAGE_PREFIX}{intent_name}", + intent={INTENT_NAME_KEY: intent_name}, + metadata={IS_EXTERNAL: True}, + entities=entity_list or [], + ) + # noinspection PyProtectedMember class BotUttered(Event): @@ -322,20 +366,10 @@ class BotUttered(Event): type_name = "bot" - def __init__(self, text=None, data=None, metadata=None, timestamp=None): + def __init__(self, text=None, data=None, metadata=None, timestamp=None) -> None: self.text = text self.data = data or {} - self._metadata = metadata or {} - super(BotUttered, self).__init__(timestamp) - - @property - def metadata(self): - # needed for backwards compatibility <1.0.0 - previously pickled events - # won't have the `_metadata` attribute - if hasattr(self, "_metadata"): - return self._metadata - else: - return {} + super().__init__(timestamp, metadata) def __members(self): data_no_nones = utils.remove_none_values(self.data) @@ -346,21 +380,21 @@ def __members(self): jsonpickle.encode(meta_no_nones), ) - def __hash__(self): + def __hash__(self) -> int: return hash(self.__members()) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, BotUttered): return False else: return self.__members() == other.__members() - def __str__(self): + def __str__(self) -> Text: return "BotUttered(text: {}, data: {}, metadata: {})".format( self.text, json.dumps(self.data), json.dumps(self.metadata) ) - def __repr__(self): + def __repr__(self) -> Text: return "BotUttered('{}', {}, {}, {})".format( self.text, json.dumps(self.data), json.dumps(self.metadata), self.timestamp ) @@ -369,7 +403,7 @@ def apply_to(self, tracker: "DialogueStateTracker") -> None: tracker.latest_bot_utterance = self - def as_story_string(self): + def as_story_string(self) -> None: return None def message(self) -> Dict[Text, Any]: @@ -390,16 +424,16 @@ def message(self) -> Dict[Text, Any]: return m @staticmethod - def empty(): + def empty() -> "BotUttered": return BotUttered() - def as_dict(self): - d = super(BotUttered, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"text": self.text, "data": self.data, "metadata": self.metadata}) return d @classmethod - def _from_parameters(cls, parameters): + def _from_parameters(cls, parameters) -> "BotUttered": try: return BotUttered( parameters.get("text"), @@ -408,7 +442,7 @@ def _from_parameters(cls, parameters): parameters.get("timestamp"), ) except KeyError as e: - raise ValueError("Failed to parse bot uttered event. {}".format(e)) + raise ValueError(f"Failed to parse bot uttered event. {e}") # noinspection PyProtectedMember @@ -423,26 +457,32 @@ class SlotSet(Event): type_name = "slot" - def __init__(self, key, value=None, timestamp=None): + def __init__( + self, + key: Text, + value: Optional[Any] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.key = key self.value = value - super(SlotSet, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __str__(self): - return "SlotSet(key: {}, value: {})".format(self.key, self.value) + def __str__(self) -> Text: + return f"SlotSet(key: {self.key}, value: {self.value})" - def __hash__(self): + def __hash__(self) -> int: return hash((self.key, jsonpickle.encode(self.value))) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, SlotSet): return False else: return (self.key, self.value) == (other.key, other.value) - def as_story_string(self): + def as_story_string(self) -> Text: props = json.dumps({self.key: self.value}, ensure_ascii=False) - return "{name}{props}".format(name=self.type_name, props=props) + return f"{self.type_name}{props}" @classmethod def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event]]: @@ -456,23 +496,24 @@ def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event] else: return None - def as_dict(self): - d = super(SlotSet, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"name": self.key, "value": self.value}) return d @classmethod - def _from_parameters(cls, parameters): + def _from_parameters(cls, parameters) -> "SlotSet": try: return SlotSet( parameters.get("name"), parameters.get("value"), parameters.get("timestamp"), + parameters.get("metadata"), ) except KeyError as e: - raise ValueError("Failed to parse set slot event. {}".format(e)) + raise ValueError(f"Failed to parse set slot event. {e}") - def apply_to(self, tracker): + def apply_to(self, tracker: "DialogueStateTracker") -> None: tracker._set_slot(self.key, self.value) @@ -486,19 +527,19 @@ class Restarted(Event): type_name = "restart" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124312) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, Restarted) - def __str__(self): + def __str__(self) -> Text: return "Restarted()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name - def apply_to(self, tracker): + def apply_to(self, tracker: "DialogueStateTracker") -> None: from rasa.core.actions.action import ( # pytype: disable=pyi-error ACTION_LISTEN_NAME, ) @@ -517,16 +558,16 @@ class UserUtteranceReverted(Event): type_name = "rewind" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124315) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, UserUtteranceReverted) - def __str__(self): + def __str__(self) -> Text: return "UserUtteranceReverted()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name def apply_to(self, tracker: "DialogueStateTracker") -> None: @@ -544,145 +585,225 @@ class AllSlotsReset(Event): type_name = "reset_slots" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124316) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, AllSlotsReset) - def __str__(self): + def __str__(self) -> Text: return "AllSlotsReset()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name - def apply_to(self, tracker): + def apply_to(self, tracker) -> None: tracker._reset_slots() # noinspection PyProtectedMember class ReminderScheduled(Event): - """ Allows asynchronous scheduling of action execution. - - As a side effect the message processor will schedule an action to be run - at the trigger date.""" + """Schedules the asynchronous triggering of a user intent + (with entities if needed) at a given time.""" type_name = "reminder" def __init__( self, - action_name, - trigger_date_time, - name=None, - kill_on_user_message=True, - timestamp=None, + intent: Text, + trigger_date_time: datetime, + entities: Optional[List[Dict]] = None, + name: Optional[Text] = None, + kill_on_user_message: bool = True, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, ): """Creates the reminder Args: - action_name: name of the action to be scheduled - trigger_date_time: date at which the execution of the action - should be triggered (either utc or with tz) - name: id of the reminder. if there are multiple reminders with - the same id only the last will be run + intent: Name of the intent to be triggered. + trigger_date_time: Date at which the execution of the action + should be triggered (either utc or with tz). + name: ID of the reminder. If there are multiple reminders with + the same id only the last will be run. + entities: Entities that should be supplied together with the + triggered intent. kill_on_user_message: ``True`` means a user message before the - trigger date will abort the reminder - timestamp: creation date of the event + trigger date will abort the reminder. + timestamp: Creation date of the event. + metadata: Optional event metadata. """ - - self.action_name = action_name + self.intent = intent + self.entities = entities self.trigger_date_time = trigger_date_time self.kill_on_user_message = kill_on_user_message self.name = name if name is not None else str(uuid.uuid1()) - super(ReminderScheduled, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __hash__(self): + def __hash__(self) -> int: return hash( ( - self.action_name, + self.intent, + self.entities, self.trigger_date_time.isoformat(), self.kill_on_user_message, self.name, ) ) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, ReminderScheduled): return False else: return self.name == other.name - def __str__(self): + def __str__(self) -> Text: return ( - "ReminderScheduled(" - "action: {}, trigger_date: {}, name: {}" - ")".format(self.action_name, self.trigger_date_time, self.name) + f"ReminderScheduled(intent: {self.intent}, trigger_date: {self.trigger_date_time}, " + f"entities: {self.entities}, name: {self.name})" ) - def _data_obj(self): + def scheduled_job_name(self, sender_id: Text) -> Text: + return ( + f"[{hash(self.name)},{hash(self.intent)},{hash(str(self.entities))}]" + f"{ACTION_NAME_SENDER_ID_CONNECTOR_STR}" + f"{sender_id}" + ) + + def _properties(self) -> Dict[Text, Any]: return { - "action": self.action_name, + "intent": self.intent, "date_time": self.trigger_date_time.isoformat(), + "entities": self.entities, "name": self.name, "kill_on_user_msg": self.kill_on_user_message, } - def as_story_string(self): - props = json.dumps(self._data_obj()) - return "{name}{props}".format(name=self.type_name, props=props) + def as_story_string(self) -> Text: + props = json.dumps(self._properties()) + return f"{self.type_name}{props}" - def as_dict(self): - d = super(ReminderScheduled, self).as_dict() - d.update(self._data_obj()) + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() + d.update(self._properties()) return d @classmethod def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event]]: trigger_date_time = parser.parse(parameters.get("date_time")) + return [ ReminderScheduled( - parameters.get("action"), + parameters.get("intent"), trigger_date_time, - parameters.get("name", None), - parameters.get("kill_on_user_msg", True), - parameters.get("timestamp"), + parameters.get("entities"), + name=parameters.get("name"), + kill_on_user_message=parameters.get("kill_on_user_msg", True), + timestamp=parameters.get("timestamp"), + metadata=parameters.get("metadata"), ) ] # noinspection PyProtectedMember class ReminderCancelled(Event): - """Cancel all jobs with a specific name.""" + """Cancel certain jobs.""" type_name = "cancel_reminder" - def __init__(self, action_name, timestamp=None): + def __init__( + self, + name: Optional[Text] = None, + intent: Optional[Text] = None, + entities: Optional[List[Dict]] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ): + """Creates a ReminderCancelled event. + + If all arguments are `None`, this will cancel all reminders. + are to be cancelled. If no arguments are supplied, this will cancel all reminders. + + Args: + name: Name of the reminder to be cancelled. + intent: Intent name that is to be used to identify the reminders to be cancelled. + entities: Entities that are to be used to identify the reminders to be cancelled. + timestamp: Optional timestamp. + metadata: Optional event metadata. """ + + self.name = name + self.intent = intent + self.entities = entities + super().__init__(timestamp, metadata) + + def __hash__(self) -> int: + return hash((self.name, self.intent, str(self.entities))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, ReminderCancelled): + return False + else: + return hash(self) == hash(other) + + def __str__(self) -> Text: + return f"ReminderCancelled(name: {self.name}, intent: {self.intent}, entities: {self.entities})" + + def cancels_job_with_name(self, job_name: Text, sender_id: Text) -> bool: + """Determines if this `ReminderCancelled` event should cancel the job with the given name. + Args: - action_name: name of the scheduled action to be cancelled + job_name: Name of the job to be tested. + sender_id: The `sender_id` of the tracker. + + Returns: + `True`, if this `ReminderCancelled` event should cancel the job with the given name, + and `False` otherwise. """ - self.action_name = action_name - super(ReminderCancelled, self).__init__(timestamp) + match = re.match( + rf"^\[([\d\-]*),([\d\-]*),([\d\-]*)\]" + rf"({re.escape(ACTION_NAME_SENDER_ID_CONNECTOR_STR)}{re.escape(sender_id)})", + job_name, + ) + if not match: + return False + name_hash, intent_hash, entities_hash = match.group(1, 2, 3) - def __hash__(self): - return hash(self.action_name) + # Cancel everything unless names/intents/entities are given to + # narrow it down. + return ( + ((not self.name) or self._matches_name_hash(name_hash)) + and ((not self.intent) or self._matches_intent_hash(intent_hash)) + and ((not self.entities) or self._matches_entities_hash(entities_hash)) + ) - def __eq__(self, other): - return isinstance(other, ReminderCancelled) + def _matches_name_hash(self, name_hash: Text) -> bool: + return str(hash(self.name)) == name_hash - def __str__(self): - return "ReminderCancelled(action: {})".format(self.action_name) + def _matches_intent_hash(self, intent_hash: Text) -> bool: + return str(hash(self.intent)) == intent_hash - def as_story_string(self): - props = json.dumps({"action": self.action_name}) - return "{name}{props}".format(name=self.type_name, props=props) + def _matches_entities_hash(self, entities_hash: Text) -> bool: + return str(hash(str(self.entities))) == entities_hash + + def as_story_string(self) -> Text: + props = json.dumps( + {"name": self.name, "intent": self.intent, "entities": self.entities} + ) + return f"{self.type_name}{props}" @classmethod def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event]]: return [ - ReminderCancelled(parameters.get("action"), parameters.get("timestamp")) + ReminderCancelled( + parameters.get("name"), + parameters.get("intent"), + parameters.get("entities"), + timestamp=parameters.get("timestamp"), + metadata=parameters.get("metadata"), + ) ] @@ -698,16 +819,16 @@ class ActionReverted(Event): type_name = "undo" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124318) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, ActionReverted) - def __str__(self): + def __str__(self) -> Text: return "ActionReverted()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name def apply_to(self, tracker: "DialogueStateTracker") -> None: @@ -721,20 +842,35 @@ class StoryExported(Event): type_name = "export" - def __init__(self, path=None, timestamp=None): + def __init__( + self, + path: Optional[Text] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ): self.path = path - super(StoryExported, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __hash__(self): + def __hash__(self) -> int: return hash(32143124319) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, StoryExported) - def __str__(self): + def __str__(self) -> Text: return "StoryExported()" - def as_story_string(self): + @classmethod + def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event]]: + return [ + StoryExported( + parameters.get("path"), + parameters.get("timestamp"), + parameters.get("metadata"), + ) + ] + + def as_story_string(self) -> Text: return self.type_name def apply_to(self, tracker: "DialogueStateTracker") -> None: @@ -748,33 +884,44 @@ class FollowupAction(Event): type_name = "followup" - def __init__(self, name, timestamp=None): + def __init__( + self, + name: Text, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.action_name = name - super(FollowupAction, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __hash__(self): + def __hash__(self) -> int: return hash(self.action_name) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, FollowupAction): return False else: return self.action_name == other.action_name - def __str__(self): - return "FollowupAction(action: {})".format(self.action_name) + def __str__(self) -> Text: + return f"FollowupAction(action: {self.action_name})" - def as_story_string(self): + def as_story_string(self) -> Text: props = json.dumps({"name": self.action_name}) - return "{name}{props}".format(name=self.type_name, props=props) + return f"{self.type_name}{props}" @classmethod def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event]]: - return [FollowupAction(parameters.get("name"), parameters.get("timestamp"))] + return [ + FollowupAction( + parameters.get("name"), + parameters.get("timestamp"), + parameters.get("metadata"), + ) + ] - def as_dict(self): - d = super(FollowupAction, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"name": self.action_name}) return d @@ -791,19 +938,19 @@ class ConversationPaused(Event): type_name = "pause" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124313) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, ConversationPaused) - def __str__(self): + def __str__(self) -> Text: return "ConversationPaused()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name - def apply_to(self, tracker): + def apply_to(self, tracker) -> None: tracker._paused = True @@ -816,19 +963,19 @@ class ConversationResumed(Event): type_name = "resume" - def __hash__(self): + def __hash__(self) -> int: return hash(32143124314) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, ConversationResumed) - def __str__(self): + def __str__(self) -> Text: return "ConversationResumed()" - def as_story_string(self): + def as_story_string(self) -> Text: return self.type_name - def apply_to(self, tracker): + def apply_to(self, tracker) -> None: tracker._paused = False @@ -846,29 +993,30 @@ def __init__( action_name: Text, policy: Optional[Text] = None, confidence: Optional[float] = None, - timestamp: Optional[int] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict] = None, ): self.action_name = action_name self.policy = policy self.confidence = confidence self.unpredictable = False - super(ActionExecuted, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __str__(self): + def __str__(self) -> Text: return "ActionExecuted(action: {}, policy: {}, confidence: {})".format( self.action_name, self.policy, self.confidence ) - def __hash__(self): + def __hash__(self) -> int: return hash(self.action_name) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, ActionExecuted): return False else: return self.action_name == other.action_name - def as_story_string(self): + def as_story_string(self) -> Text: return self.action_name @classmethod @@ -880,11 +1028,12 @@ def _from_story_string(cls, parameters: Dict[Text, Any]) -> Optional[List[Event] parameters.get("policy"), parameters.get("confidence"), parameters.get("timestamp"), + parameters.get("metadata"), ) ] - def as_dict(self): - d = super(ActionExecuted, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() policy = None # for backwards compatibility (persisted evemts) if hasattr(self, "policy"): policy = self.policy @@ -909,15 +1058,21 @@ class AgentUttered(Event): type_name = "agent" - def __init__(self, text=None, data=None, timestamp=None): + def __init__( + self, + text: Optional[Text] = None, + data: Optional[Any] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.text = text self.data = data - super(AgentUttered, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __hash__(self): + def __hash__(self) -> int: return hash((self.text, jsonpickle.encode(self.data))) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, AgentUttered): return False else: @@ -926,7 +1081,7 @@ def __eq__(self, other): jsonpickle.encode(other.data), ) - def __str__(self): + def __str__(self) -> Text: return "AgentUttered(text: {}, data: {})".format( self.text, json.dumps(self.data) ) @@ -935,99 +1090,136 @@ def apply_to(self, tracker: "DialogueStateTracker") -> None: pass - def as_story_string(self): + def as_story_string(self) -> None: return None - def as_dict(self): - d = super(AgentUttered, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"text": self.text, "data": self.data}) return d @staticmethod - def empty(): + def empty() -> "AgentUttered": return AgentUttered() @classmethod - def _from_parameters(cls, parameters): + def _from_parameters(cls, parameters) -> "AgentUttered": try: return AgentUttered( parameters.get("text"), parameters.get("data"), parameters.get("timestamp"), + parameters.get("metadata"), ) except KeyError as e: - raise ValueError("Failed to parse agent uttered event. {}".format(e)) + raise ValueError(f"Failed to parse agent uttered event. {e}") -class Form(Event): - """If `name` is not None: activates a form with `name` - else deactivates active form +class ActiveLoop(Event): + """If `name` is not None: activates a loop with `name` else deactivates active loop. """ - type_name = "form" + type_name = "active_loop" - def __init__(self, name, timestamp=None): + def __init__( + self, + name: Optional[Text], + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.name = name - super(Form, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __str__(self): - return "Form({})".format(self.name) + def __str__(self) -> Text: + return f"Loop({self.name})" - def __hash__(self): + def __hash__(self) -> int: return hash(self.name) - def __eq__(self, other): - if not isinstance(other, Form): + def __eq__(self, other) -> bool: + if not isinstance(other, ActiveLoop): return False else: return self.name == other.name - def as_story_string(self): + def as_story_string(self) -> Text: props = json.dumps({"name": self.name}) - return "{name}{props}".format(name=self.type_name, props=props) + return f"{ActiveLoop.type_name}{props}" @classmethod - def _from_story_string(cls, parameters): + def _from_story_string(cls, parameters: Dict[Text, Any]) -> List["ActiveLoop"]: """Called to convert a parsed story line into an event.""" - return [Form(parameters.get("name"), parameters.get("timestamp"))] + return [ + ActiveLoop( + parameters.get("name"), + parameters.get("timestamp"), + parameters.get("metadata"), + ) + ] - def as_dict(self): - d = super(Form, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"name": self.name}) return d def apply_to(self, tracker: "DialogueStateTracker") -> None: - tracker.change_form_to(self.name) + tracker.change_loop_to(self.name) + + +class LegacyForm(ActiveLoop): + """Legacy handler of old `Form` events. + + The `ActiveLoop` event used to be called `Form`. This class is there to handle old + legacy events which were stored with the old type name `form`. + """ + + type_name = "form" + + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() + # Dump old `Form` events as `ActiveLoop` events instead of keeping the old + # event type. + d["event"] = ActiveLoop.type_name + return d class FormValidation(Event): - """Event added by FormPolicy to notify form action - whether or not to validate the user input""" + """Event added by FormPolicy and RulePolicy to notify form action + whether or not to validate the user input.""" type_name = "form_validation" - def __init__(self, validate, timestamp=None): + def __init__( + self, + validate: bool, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.validate = validate - super(FormValidation, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __str__(self): - return "FormValidation({})".format(self.validate) + def __str__(self) -> Text: + return f"FormValidation({self.validate})" - def __hash__(self): + def __hash__(self) -> int: return hash(self.validate) - def __eq__(self, other): + def __eq__(self, other) -> bool: return isinstance(other, FormValidation) - def as_story_string(self): + def as_story_string(self) -> None: return None @classmethod - def _from_parameters(cls, parameters): - return FormValidation(parameters.get("validate"), parameters.get("timestamp")) + def _from_parameters(cls, parameters) -> "FormValidation": + return FormValidation( + parameters.get("validate"), + parameters.get("timestamp"), + parameters.get("metadata"), + ) - def as_dict(self): - d = super(FormValidation, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update({"validate": self.validate}) return d @@ -1040,42 +1232,50 @@ class ActionExecutionRejected(Event): type_name = "action_execution_rejected" - def __init__(self, action_name, policy=None, confidence=None, timestamp=None): + def __init__( + self, + action_name: Text, + policy: Optional[Text] = None, + confidence: Optional[float] = None, + timestamp: Optional[float] = None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> None: self.action_name = action_name self.policy = policy self.confidence = confidence - super(ActionExecutionRejected, self).__init__(timestamp) + super().__init__(timestamp, metadata) - def __str__(self): + def __str__(self) -> Text: return ( "ActionExecutionRejected(" "action: {}, policy: {}, confidence: {})" "".format(self.action_name, self.policy, self.confidence) ) - def __hash__(self): + def __hash__(self) -> int: return hash(self.action_name) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, ActionExecutionRejected): return False else: return self.action_name == other.action_name @classmethod - def _from_parameters(cls, parameters): + def _from_parameters(cls, parameters) -> "ActionExecutionRejected": return ActionExecutionRejected( parameters.get("name"), parameters.get("policy"), parameters.get("confidence"), parameters.get("timestamp"), + parameters.get("metadata"), ) - def as_story_string(self): + def as_story_string(self) -> None: return None - def as_dict(self): - d = super(ActionExecutionRejected, self).as_dict() + def as_dict(self) -> Dict[Text, Any]: + d = super().as_dict() d.update( { "name": self.action_name, @@ -1087,3 +1287,28 @@ def as_dict(self): def apply_to(self, tracker: "DialogueStateTracker") -> None: tracker.reject_action(self.action_name) + + +class SessionStarted(Event): + """Mark the beginning of a new conversation session.""" + + type_name = "session_started" + + def __hash__(self) -> int: + return hash(32143124320) + + def __eq__(self, other: Any) -> bool: + return isinstance(other, SessionStarted) + + def __str__(self) -> Text: + return "SessionStarted()" + + def as_story_string(self) -> None: + logger.warning( + f"'{self.type_name}' events cannot be serialised as story strings." + ) + return None + + def apply_to(self, tracker: "DialogueStateTracker") -> None: + # noinspection PyProtectedMember + tracker._reset() diff --git a/rasa/core/exceptions.py b/rasa/core/exceptions.py index 51cac67fb213..5c3de7db5d1c 100644 --- a/rasa/core/exceptions.py +++ b/rasa/core/exceptions.py @@ -1,3 +1,5 @@ +from typing import Optional, Text + from rasa.exceptions import RasaException @@ -8,7 +10,7 @@ class RasaCoreException(RasaException): class StoryParseError(RasaCoreException, ValueError): """Raised if there is an error while parsing a story file.""" - def __init__(self, message): + def __init__(self, message) -> None: self.message = message @@ -19,11 +21,11 @@ class UnsupportedDialogueModelError(RasaCoreException): message -- explanation of why the model is invalid """ - def __init__(self, message, model_version=None): + def __init__(self, message: Text, model_version: Optional[Text] = None) -> None: self.message = message self.model_version = model_version - def __str__(self): + def __str__(self) -> Text: return self.message @@ -34,5 +36,5 @@ class AgentNotReady(RasaCoreException): if someone tries to parse a message with that agent, this exception will be thrown.""" - def __init__(self, message): + def __init__(self, message: Text) -> None: self.message = message diff --git a/rasa/core/exporter.py b/rasa/core/exporter.py new file mode 100644 index 000000000000..d533d7b5b99f --- /dev/null +++ b/rasa/core/exporter.py @@ -0,0 +1,302 @@ +import itertools +import logging +import uuid +from typing import Text, Optional, List, Set, Dict, Any + +from tqdm import tqdm + +import rasa.cli.utils as cli_utils +from rasa.core.brokers.broker import EventBroker +from rasa.core.brokers.pika import PikaEventBroker +from rasa.core.constants import RASA_EXPORT_PROCESS_ID_HEADER_NAME +from rasa.core.tracker_store import TrackerStore +from rasa.core.trackers import EventVerbosity +from rasa.exceptions import ( + NoEventsToMigrateError, + NoConversationsInTrackerStoreError, + NoEventsInTimeRangeError, + PublishingError, +) + +logger = logging.getLogger(__name__) + + +class Exporter: + """Manages the publishing of events in a tracker store to an event broker. + + Attributes: + endpoints_path: Path to the endpoints file used to configure the event + broker and tracker store. If `None`, the default path ('endpoints.yml') + is used. + tracker_store: `TrackerStore` to export conversations from. + event_broker: `EventBroker` to export conversations to. + requested_conversation_ids: List of conversation IDs requested to be + processed. + minimum_timestamp: Minimum timestamp of events that are published. + If `None`, apply no such constraint. + maximum_timestamp: Maximum timestamp of events that are published. + If `None`, apply no such constraint. + """ + + def __init__( + self, + tracker_store: TrackerStore, + event_broker: EventBroker, + endpoints_path: Text, + requested_conversation_ids: Optional[Text] = None, + minimum_timestamp: Optional[float] = None, + maximum_timestamp: Optional[float] = None, + ) -> None: + self.endpoints_path = endpoints_path + self.tracker_store = tracker_store + # The `TrackerStore` should return all events on `retrieve` and not just the + # ones from the last session. + self.tracker_store.load_events_from_previous_conversation_sessions = True + + self.event_broker = event_broker + self.requested_conversation_ids = requested_conversation_ids + self.minimum_timestamp = minimum_timestamp + self.maximum_timestamp = maximum_timestamp + + def publish_events(self) -> int: + """Publish events in a tracker store using an event broker. + + Exits if the publishing of events is interrupted due to an error. In that case, + the CLI command to continue the export where it was interrupted is printed. + + Returns: + The number of successfully published events. + + """ + events = self._fetch_events_within_time_range() + + cli_utils.print_info( + f"Selected {len(events)} events for publishing. Ready to go 🚀" + ) + + published_events = 0 + current_timestamp = None + + headers = self._get_message_headers() + + for event in tqdm(events, "events"): + # noinspection PyBroadException + try: + self._publish_with_message_headers(event, headers) + published_events += 1 + current_timestamp = event["timestamp"] + except Exception as e: + logger.exception(e) + raise PublishingError(current_timestamp) + + self.event_broker.close() + + return published_events + + def _get_message_headers(self) -> Optional[Dict[Text, Text]]: + """Generate a message header for publishing events to a `PikaEventBroker`. + + Returns: + Message headers with a randomly generated uuid under the + `RASA_EXPORT_PROCESS_ID_HEADER_NAME` key if `self.event_broker` is a + `PikaEventBroker`, else `None`. + + """ + if isinstance(self.event_broker, PikaEventBroker): + return {RASA_EXPORT_PROCESS_ID_HEADER_NAME: uuid.uuid4().hex} + + return None + + def _publish_with_message_headers( + self, event: Dict[Text, Any], headers: Optional[Dict[Text, Text]] + ) -> None: + """Publish `event` to a message broker with `headers`. + + Args: + event: Serialized event to be published. + headers: Message headers to be published if `self.event_broker` is a + `PikaEventBroker`. + + """ + if isinstance(self.event_broker, PikaEventBroker): + self.event_broker.publish(event=event, headers=headers) + else: + self.event_broker.publish(event) + + def _get_conversation_ids_in_tracker(self) -> Set[Text]: + """Fetch conversation IDs in `self.tracker_store`. + + Returns: + A set of conversation IDs in `self.tracker_store`. + + Raises: + `NoConversationsInTrackerStoreError` if + `conversation_ids_in_tracker_store` is empty. + + """ + conversation_ids_in_tracker_store = set(self.tracker_store.keys()) + + if conversation_ids_in_tracker_store: + return conversation_ids_in_tracker_store + + raise NoConversationsInTrackerStoreError( + "Could not find any conversations in connected tracker store. " + "Please validate your `endpoints.yml` and make sure the defined " + "tracker store exists. Exiting." + ) + + def _validate_all_requested_ids_exist( + self, conversation_ids_in_tracker_store: Set[Text] + ) -> None: + """Warn user if `self.requested_conversation_ids` contains IDs not found in + `conversation_ids_in_tracker_store` + + Args: + conversation_ids_in_tracker_store: Set of conversation IDs contained in + the tracker store. + + """ + missing_ids_in_tracker_store = ( + set(self.requested_conversation_ids) - conversation_ids_in_tracker_store + ) + if missing_ids_in_tracker_store: + cli_utils.print_warning( + f"Could not find the following requested " + f"conversation IDs in connected tracker store: " + f"{', '.join(sorted(missing_ids_in_tracker_store))}" + ) + + def _get_conversation_ids_to_process(self) -> Set[Text]: + """Get conversation IDs that are good for processing. + + Finds the intersection of events that are contained in the tracker store with + those events requested as a command-line argument. + + Returns: + Conversation IDs that are both requested and contained in the tracker + store. If no conversation IDs are requested, all conversation IDs in the + tracker store are returned. + + """ + conversation_ids_in_tracker_store = self._get_conversation_ids_in_tracker() + + if not self.requested_conversation_ids: + return conversation_ids_in_tracker_store + + self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store) + + conversation_ids_to_process = conversation_ids_in_tracker_store & set( + self.requested_conversation_ids + ) + + if not conversation_ids_to_process: + raise NoEventsToMigrateError( + "Could not find an overlap between the requested " + "conversation IDs and those found in the tracker store. Exiting." + ) + + return conversation_ids_to_process + + def _fetch_events_within_time_range(self) -> List[Dict[Text, Any]]: + """Fetch all events for `conversation_ids` within the supplied time range. + + Returns: + Serialized events with added `sender_id` field. + + """ + conversation_ids_to_process = self._get_conversation_ids_to_process() + + cli_utils.print_info( + f"Fetching events for {len(conversation_ids_to_process)} " + f"conversation IDs:" + ) + + events = [] + + for conversation_id in tqdm(conversation_ids_to_process, "conversation IDs"): + tracker = self.tracker_store.retrieve(conversation_id) + if not tracker: + logger.info( + f"Could not retrieve tracker for conversation ID " + f"'{conversation_id}'. Skipping." + ) + continue + + _events = tracker.current_state(EventVerbosity.ALL)["events"] + + if not _events: + logger.info( + f"No events to migrate for conversation ID '{conversation_id}'." + ) + continue + + # the conversation IDs are needed in the event publishing + events.extend( + self._get_events_for_conversation_id(_events, conversation_id) + ) + + return self._sort_and_select_events_by_timestamp(events) + + @staticmethod + def _get_events_for_conversation_id( + events: List[Dict[Text, Any]], conversation_id: Text + ) -> List[Dict[Text, Any]]: + """Get serialised events with added `sender_id` key. + + Args: + events: Events to modify. + conversation_id: Conversation ID to add to events. + + Returns: + Events with added `sender_id` key. + + """ + events_with_conversation_id = [] + + for event in events: + event["sender_id"] = conversation_id + events_with_conversation_id.append(event) + + return events_with_conversation_id + + def _sort_and_select_events_by_timestamp( + self, events: List[Dict[Text, Any]] + ) -> List[Dict[Text, Any]]: + """Sort list of events by ascending timestamp, and select events within time + range. + + Args: + events: List of serialized events to be sorted and selected from. + + Returns: + List of serialized and sorted (by timestamp) events within the requested + time range. + + Raises: + `NoEventsInTimeRangeError` error if no events are found within the + requested time range. + + """ + logger.debug(f"Sorting and selecting from {len(events)} total events found.") + # sort the events by timestamp just in case they're not sorted already + events = sorted(events, key=lambda x: x["timestamp"]) + + # drop events failing minimum timestamp requirement + if self.minimum_timestamp is not None: + events = itertools.dropwhile( + lambda x: x["timestamp"] < self.minimum_timestamp, events + ) + + # select events passing maximum timestamp requirement + if self.maximum_timestamp is not None: + events = itertools.takewhile( + lambda x: x["timestamp"] < self.maximum_timestamp, events + ) + + events = list(events) + if not events: + raise NoEventsInTimeRangeError( + "Could not find any events within requested time range. Exiting." + ) + + return events diff --git a/rasa/core/featurizers.py b/rasa/core/featurizers.py index 2374181d8f57..39f12f086d90 100644 --- a/rasa/core/featurizers.py +++ b/rasa/core/featurizers.py @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) -class SingleStateFeaturizer(object): +class SingleStateFeaturizer: """Base class for mechanisms to transform the conversations state into ML formats. Subclasses of SingleStateFeaturizer decide how the bot will transform @@ -54,7 +54,7 @@ def action_as_one_hot(action: Text, domain: Domain) -> np.ndarray: def create_encoded_all_actions(self, domain: Domain) -> np.ndarray: """Create matrix with all actions from domain encoded in rows.""" - pass + raise NotImplementedError("Featurizer must implement encoding actions.") class BinarySingleStateFeaturizer(SingleStateFeaturizer): @@ -63,10 +63,10 @@ class BinarySingleStateFeaturizer(SingleStateFeaturizer): All features should be either on or off, denoting them with 1 or 0. """ - def __init__(self): + def __init__(self) -> None: """Declares instant variables.""" - super(BinarySingleStateFeaturizer, self).__init__() + super().__init__() self.num_features = None self.input_state_map = None @@ -151,7 +151,7 @@ def __init__( self, use_shared_vocab: bool = False, split_symbol: Text = "_" ) -> None: """inits vocabulary for label bag of words representation""" - super(LabelTokenizerSingleStateFeaturizer, self).__init__() + super().__init__() self.use_shared_vocab = use_shared_vocab self.split_symbol = split_symbol @@ -165,16 +165,16 @@ def __init__( self.user_vocab = None @staticmethod - def _create_label_token_dict(labels, split_symbol="_"): + def _create_label_token_dict(labels, split_symbol="_") -> Dict[Text, int]: """Splits labels into tokens by using provided symbol. Creates the lookup dictionary for this tokens. Values in this dict are used for featurization. """ - distinct_tokens = set( - [token for label in labels for token in label.split(split_symbol)] - ) + distinct_tokens = { + token for label in labels for token in label.split(split_symbol) + } return {token: idx for idx, token in enumerate(sorted(distinct_tokens))} def prepare_from_domain(self, domain: Domain) -> None: @@ -238,8 +238,7 @@ def encode(self, state: Dict[Text, float]) -> np.ndarray: else: logger.warning( - "Feature '{}' could not be found in " - "feature map.".format(state_name) + f"Feature '{state_name}' could not be found in feature map." ) if using_only_ints: @@ -260,7 +259,7 @@ def create_encoded_all_actions(self, domain: Domain) -> np.ndarray: return encoded_all_actions -class TrackerFeaturizer(object): +class TrackerFeaturizer: """Base class for actual tracker featurizers.""" def __init__( @@ -428,15 +427,15 @@ def create_X( X, _ = self._featurize_states(trackers_as_states) return X - def persist(self, path): + def persist(self, path) -> None: featurizer_file = os.path.join(path, "featurizer.json") rasa.utils.io.create_directory_for_file(featurizer_file) - with open(featurizer_file, "w", encoding="utf-8") as f: - # noinspection PyTypeChecker - f.write(str(jsonpickle.encode(self))) + + # noinspection PyTypeChecker + rasa.utils.io.write_text_file(str(jsonpickle.encode(self)), featurizer_file) @staticmethod - def load(path): + def load(path) -> Optional["TrackerFeaturizer"]: """Loads the featurizer from file.""" featurizer_file = os.path.join(path, "featurizer.json") @@ -463,13 +462,11 @@ def __init__( use_intent_probabilities: bool = False, ) -> None: - super(FullDialogueTrackerFeaturizer, self).__init__( - state_featurizer, use_intent_probabilities - ) + super().__init__(state_featurizer, use_intent_probabilities) self.max_len = None @staticmethod - def _calculate_max_len(trackers_as_actions): + def _calculate_max_len(trackers_as_actions) -> Optional[int]: """Calculate the length of the longest dialogue.""" if trackers_as_actions: @@ -532,7 +529,7 @@ def training_states_and_actions( trackers_as_actions.append(actions) self.max_len = self._calculate_max_len(trackers_as_actions) - logger.debug("The longest dialogue has {} actions.".format(self.max_len)) + logger.debug(f"The longest dialogue has {self.max_len} actions.") return trackers_as_states, trackers_as_actions @@ -560,26 +557,26 @@ class MaxHistoryTrackerFeaturizer(TrackerFeaturizer): def __init__( self, state_featurizer: Optional[SingleStateFeaturizer] = None, - max_history: int = None, + max_history: Optional[int] = None, remove_duplicates: bool = True, use_intent_probabilities: bool = False, ) -> None: - super(MaxHistoryTrackerFeaturizer, self).__init__( - state_featurizer, use_intent_probabilities - ) + super().__init__(state_featurizer, use_intent_probabilities) self.max_history = max_history or self.MAX_HISTORY_DEFAULT self.remove_duplicates = remove_duplicates @staticmethod def slice_state_history( - states: List[Dict[Text, float]], slice_length: int + states: List[Dict[Text, float]], slice_length: Optional[int] ) -> List[Optional[Dict[Text, float]]]: """Slices states from the trackers history. If the slice is at the array borders, padding will be added to ensure the slice length. """ + if slice_length is None: + return states slice_end = len(states) slice_start = max(0, slice_end - slice_length) @@ -589,12 +586,10 @@ def slice_state_history( return state_features @staticmethod - def _hash_example(states, action): + def _hash_example(states, action) -> int: """Hash states for efficient deduplication.""" - frozen_states = tuple( - (s if s is None else frozenset(s.items()) for s in states) - ) + frozen_states = tuple(s if s is None else frozenset(s.items()) for s in states) frozen_actions = (action,) return hash((frozen_states, frozen_actions)) diff --git a/rasa/core/interpreter.py b/rasa/core/interpreter.py index a0d32700eed7..b0ac7395091b 100644 --- a/rasa/core/interpreter.py +++ b/rasa/core/interpreter.py @@ -7,15 +7,18 @@ import os from typing import Text, List, Dict, Any, Union, Optional, Tuple +from rasa.constants import DOCS_URL_STORIES from rasa.core import constants from rasa.core.trackers import DialogueStateTracker from rasa.core.constants import INTENT_MESSAGE_PREFIX +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils.common import raise_warning, class_from_module_path from rasa.utils.endpoints import EndpointConfig logger = logging.getLogger(__name__) -class NaturalLanguageInterpreter(object): +class NaturalLanguageInterpreter: async def parse( self, text: Text, @@ -28,40 +31,27 @@ async def parse( @staticmethod def create( - obj: Union[Text, "NaturalLanguageInterpreter"], - endpoint: Optional[EndpointConfig] = None, + obj: Union["NaturalLanguageInterpreter", EndpointConfig, Text, None] ) -> "NaturalLanguageInterpreter": + """Factory to create an natural language interpreter.""" if isinstance(obj, NaturalLanguageInterpreter): return obj - - if not isinstance(obj, str): - if obj is not None: - logger.warning( - "Tried to create NLU interpreter " - "from '{}', which is not possible." - "Using RegexInterpreter instead." - "".format(obj) - ) + elif isinstance(obj, str) and os.path.exists(obj): + return RasaNLUInterpreter(model_directory=obj) + elif isinstance(obj, str) and not os.path.exists(obj): + # user passed in a string, but file does not exist + logger.warning( + f"No local NLU model '{obj}' found. Using RegexInterpreter instead." + ) return RegexInterpreter() - - if endpoint is None: - if not os.path.exists(obj): - logger.warning( - "No local NLU model '{}' found. Using RegexInterpreter instead.".format( - obj - ) - ) - return RegexInterpreter() - else: - return RasaNLUInterpreter(model_directory=obj) - - return RasaNLUHttpInterpreter(endpoint) + else: + return _create_from_endpoint_config(obj) class RegexInterpreter(NaturalLanguageInterpreter): @staticmethod - def allowed_prefixes(): + def allowed_prefixes() -> Text: return INTENT_MESSAGE_PREFIX @staticmethod @@ -97,17 +87,17 @@ def _parse_parameters( return RegexInterpreter._create_entities(parsed_entities, sidx, eidx) else: raise Exception( - "Parsed value isn't a json object " - "(instead parser found '{}')" - ".".format(type(parsed_entities)) + f"Parsed value isn't a json object " + f"(instead parser found '{type(parsed_entities)}')" ) except Exception as e: - logger.warning( - "Invalid to parse arguments in line " - "'{}'. Failed to decode parameters " - "as a json object. Make sure the intent " - "is followed by a proper json object. " - "Error: {}".format(user_input, e) + raise_warning( + f"Failed to parse arguments in line " + f"'{user_input}'. Failed to decode parameters " + f"as a json object. Make sure the intent " + f"is followed by a proper json object. " + f"Error: {e}", + docs=DOCS_URL_STORIES, ) return [] @@ -119,11 +109,12 @@ def _parse_confidence(confidence_str: Text) -> float: try: return float(confidence_str.strip()[1:]) except Exception as e: - logger.warning( - "Invalid to parse confidence value in line " - "'{}'. Make sure the intent confidence is an " - "@ followed by a decimal number. " - "Error: {}".format(confidence_str, e) + raise_warning( + f"Invalid to parse confidence value in line " + f"'{confidence_str}'. Make sure the intent confidence is an " + f"@ followed by a decimal number. " + f"Error: {e}", + docs=DOCS_URL_STORIES, ) return 0.0 @@ -135,7 +126,7 @@ def _starts_with_intent_prefix(self, text: Text) -> bool: @staticmethod def extract_intent_and_entities( - user_input: Text + user_input: Text, ) -> Tuple[Optional[Text], float, List[Dict[Text, Any]]]: """Parse the user input using regexes to extract intent & entities.""" @@ -151,9 +142,7 @@ def extract_intent_and_entities( return event_name, confidence, entities else: - logger.warning( - "Failed to parse intent end entities from '{}'. ".format(user_input) - ) + logger.warning(f"Failed to parse intent end entities from '{user_input}'.") return None, 0.0, [] async def parse( @@ -164,6 +153,16 @@ async def parse( ) -> Dict[Text, Any]: """Parse a text message.""" + return self.synchronous_parse(text, message_id, tracker) + + def synchronous_parse( + self, + text: Text, + message_id: Optional[Text] = None, + tracker: DialogueStateTracker = None, + ) -> Dict[Text, Any]: + """Parse a text message.""" + intent, confidence, entities = self.extract_intent_and_entities(text) if self._starts_with_intent_prefix(text): @@ -173,18 +172,18 @@ async def parse( return { "text": message_text, - "intent": {"name": intent, "confidence": confidence}, - "intent_ranking": [{"name": intent, "confidence": confidence}], + "intent": {INTENT_NAME_KEY: intent, "confidence": confidence}, + "intent_ranking": [{INTENT_NAME_KEY: intent, "confidence": confidence}], "entities": entities, } class RasaNLUHttpInterpreter(NaturalLanguageInterpreter): - def __init__(self, endpoint: EndpointConfig = None) -> None: - if endpoint: - self.endpoint = endpoint + def __init__(self, endpoint_config: Optional[EndpointConfig] = None) -> None: + if endpoint_config: + self.endpoint_config = endpoint_config else: - self.endpoint = EndpointConfig(constants.DEFAULT_SERVER_URL) + self.endpoint_config = EndpointConfig(constants.DEFAULT_SERVER_URL) async def parse( self, @@ -197,37 +196,37 @@ async def parse( Return a default value if the parsing of the text failed.""" default_return = { - "intent": {"name": "", "confidence": 0.0}, + "intent": {INTENT_NAME_KEY: "", "confidence": 0.0}, "entities": [], "text": "", } - result = await self._rasa_http_parse(text, message_id, tracker) + result = await self._rasa_http_parse(text, message_id) return result if result is not None else default_return async def _rasa_http_parse( - self, - text: Text, - message_id: Optional[Text] = None, - tracker: DialogueStateTracker = None, + self, text: Text, message_id: Optional[Text] = None ) -> Optional[Dict[Text, Any]]: """Send a text message to a running rasa NLU http server. Return `None` on failure.""" - from requests.compat import urljoin # pytype: disable=import-error - if not self.endpoint: + if not self.endpoint_config: logger.error( - "Failed to parse text '{}' using rasa NLU over http. " - "No rasa NLU server specified!".format(text) + f"Failed to parse text '{text}' using rasa NLU over http. " + f"No rasa NLU server specified!" ) return None - params = {"token": self.endpoint.token, "text": text, "message_id": message_id} + params = { + "token": self.endpoint_config.token, + "text": text, + "message_id": message_id, + } - if self.endpoint.url.endswith("/"): - url = self.endpoint.url + "model/parse" + if self.endpoint_config.url.endswith("/"): + url = self.endpoint_config.url + "model/parse" else: - url = self.endpoint.url + "/model/parse" + url = self.endpoint_config.url + "/model/parse" # noinspection PyBroadException try: @@ -236,15 +235,14 @@ async def _rasa_http_parse( if resp.status == 200: return await resp.json() else: + response_text = await resp.text() logger.error( - "Failed to parse text '{}' using rasa NLU over " - "http. Error: {}".format(text, await resp.text()) + f"Failed to parse text '{text}' using rasa NLU over " + f"http. Error: {response_text}" ) return None except Exception: - logger.exception( - "Failed to parse text '{}' using rasa NLU over http.".format(text) - ) + logger.exception(f"Failed to parse text '{text}' using rasa NLU over http.") return None @@ -276,11 +274,40 @@ async def parse( if self.lazy_init and self.interpreter is None: self._load_interpreter() - result = self.interpreter.parse(text, message_id) + result = self.interpreter.parse(text) return result - def _load_interpreter(self): + def _load_interpreter(self) -> None: from rasa.nlu.model import Interpreter self.interpreter = Interpreter.load(self.model_directory) + + +def _create_from_endpoint_config( + endpoint_config: Optional[EndpointConfig], +) -> "NaturalLanguageInterpreter": + """Instantiate a natural language interpreter based on its configuration.""" + + if endpoint_config is None: + return RegexInterpreter() + elif endpoint_config.type is None or endpoint_config.type.lower() == "http": + return RasaNLUHttpInterpreter(endpoint_config=endpoint_config) + else: + return _load_from_module_name_in_endpoint_config(endpoint_config) + + +def _load_from_module_name_in_endpoint_config( + endpoint_config: EndpointConfig, +) -> "NaturalLanguageInterpreter": + """Instantiate an event channel based on its class name.""" + + try: + nlu_interpreter_class = class_from_module_path(endpoint_config.type) + return nlu_interpreter_class(endpoint_config=endpoint_config) + except (AttributeError, ImportError) as e: + raise Exception( + f"Could not find a class based on the module path " + f"'{endpoint_config.type}'. Failed to create a " + f"`NaturalLanguageInterpreter` instance. Error: {e}" + ) diff --git a/rasa/core/jobs.py b/rasa/core/jobs.py index ba89054eebba..fd4c76bc0314 100644 --- a/rasa/core/jobs.py +++ b/rasa/core/jobs.py @@ -3,6 +3,7 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler from pytz import UnknownTimeZoneError, utc +from rasa.utils.common import raise_warning __scheduler = None @@ -22,7 +23,7 @@ async def scheduler() -> AsyncIOScheduler: __scheduler.start() return __scheduler except UnknownTimeZoneError: - logger.warning( + raise_warning( "apscheduler could not find a timezone and is " "defaulting to utc. This is probably because " "your system timezone is not set. " @@ -50,7 +51,7 @@ async def scheduler() -> AsyncIOScheduler: return __scheduler -def kill_scheduler(): +def kill_scheduler() -> None: """Terminate the scheduler if started. Another call to `scheduler` will create a new scheduler.""" diff --git a/rasa/core/lock.py b/rasa/core/lock.py index bbcb49bff84f..d148440001df 100644 --- a/rasa/core/lock.py +++ b/rasa/core/lock.py @@ -1,9 +1,9 @@ import json import logging from collections import deque -from typing import Text, Optional, Union, Deque, Dict, Any import time +from typing import Text, Optional, Union, Deque, Dict, Any logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def from_dict(cls, data: Dict[Text, Union[int, float]]) -> "Ticket": return cls(number=data["number"], expires=data["expires"]) def __repr__(self) -> Text: - return "Ticket(number: {}, expires: {})".format(self.number, self.expires) + return f"Ticket(number: {self.number}, expires: {self.expires})" class TicketLock: @@ -72,7 +72,7 @@ def is_locked(self, ticket_number: int) -> bool: return self.now_serving != ticket_number - def issue_ticket(self, lifetime: Union[float, int]) -> int: + def issue_ticket(self, lifetime: float) -> int: """Issue a new ticket and return its number.""" self.remove_expired_tickets() @@ -100,10 +100,8 @@ def last_issued(self) -> int: """ ticket_number = self._ticket_number_for(-1) - if ticket_number is not None: - return ticket_number - return NO_TICKET_ISSUED + return ticket_number if ticket_number is not None else NO_TICKET_ISSUED @property def now_serving(self) -> Optional[int]: diff --git a/rasa/core/lock_store.py b/rasa/core/lock_store.py index 8e4c5cda9b61..0e6781a96100 100644 --- a/rasa/core/lock_store.py +++ b/rasa/core/lock_store.py @@ -2,36 +2,29 @@ import json import logging import os -from typing import Text, Optional, Union -from async_generator import asynccontextmanager, async_generator, yield_ +from async_generator import asynccontextmanager +from typing import Text, Union, Optional, AsyncGenerator from rasa.core.constants import DEFAULT_LOCK_LIFETIME -from rasa.core.lock import TicketLock, NO_TICKET_ISSUED +from rasa.utils import common +from rasa.core.lock import TicketLock from rasa.utils.endpoints import EndpointConfig logger = logging.getLogger(__name__) -ACCEPTED_LOCK_STORES = ["in_memory", "redis"] -LOCK_LIFETIME = int(os.environ.get("TICKET_LOCK_LIFETIME", 0)) or DEFAULT_LOCK_LIFETIME +def _get_lock_lifetime() -> int: + return int(os.environ.get("TICKET_LOCK_LIFETIME", 0)) or DEFAULT_LOCK_LIFETIME -# noinspection PyUnresolvedReferences -class LockError(Exception): - """Exception that is raised when a lock cannot be acquired. - - Attributes: - message (str): explanation of which `conversation_id` raised the error - """ - - pass +LOCK_LIFETIME = _get_lock_lifetime() +DEFAULT_SOCKET_TIMEOUT_IN_SECONDS = 10 # noinspection PyUnresolvedReferences -class TicketExistsError(Exception): - """Exception that is raised when an already-existing ticket for a conversation - has been issued. +class LockError(Exception): + """Exception that is raised when a lock cannot be acquired. Attributes: message (str): explanation of which `conversation_id` raised the error @@ -42,38 +35,13 @@ class TicketExistsError(Exception): class LockStore: @staticmethod - def find_lock_store(store: EndpointConfig = None) -> "LockStore": - if store is None or store.type is None or store.type == "in_memory": - lock_store = InMemoryLockStore() - elif store.type == "redis": - lock_store = RedisLockStore(host=store.url, **store.kwargs) - else: - logger.debug( - "Could not load built-in `LockStore`, which needs to be of " - "type: {}. Trying to load `LockStore` from module path '{}' " - "instead." - "".format(store.type, ", ".join(ACCEPTED_LOCK_STORES), store.type) - ) - lock_store = LockStore.load_lock_store_from_module_path(store.type) + def create(obj: Union["LockStore", EndpointConfig, None]) -> "LockStore": + """Factory to create a lock store.""" - logger.debug( - "Connected to lock store '{}'.".format(lock_store.__class__.__name__) - ) - - return lock_store - - @staticmethod - def load_lock_store_from_module_path(module_path: Text) -> "LockStore": - """Given the name of a `LockStore` module tries to retrieve it.""" - - from rasa.utils.common import class_from_module_path - - try: - return class_from_module_path(module_path) - except ImportError: - raise ImportError( - "Cannot retrieve `LockStore` from path '{}'.".format(module_path) - ) + if isinstance(obj, LockStore): + return obj + else: + return _create_from_endpoint_config(obj) @staticmethod def create_lock(conversation_id: Text) -> TicketLock: @@ -97,63 +65,48 @@ def save_lock(self, lock: TicketLock) -> None: raise NotImplementedError def issue_ticket( - self, conversation_id: Text, lock_lifetime: Union[float, int] = LOCK_LIFETIME + self, conversation_id: Text, lock_lifetime: float = LOCK_LIFETIME ) -> int: """Issue new ticket with `lock_lifetime` for lock associated with `conversation_id`. Creates a new lock if none is found. """ + logger.debug(f"Issuing ticket for conversation '{conversation_id}'.") + try: + lock = self.get_or_create_lock(conversation_id) + ticket = lock.issue_ticket(lock_lifetime) + self.save_lock(lock) - lock = self.get_or_create_lock(conversation_id) - ticket = lock.issue_ticket(lock_lifetime) - - while True: - try: - self.ensure_ticket_available(lock) - break - except TicketExistsError: - # issue a new ticket if current ticket number has been issued twice - logger.exception( - "Ticket could not be issued. Issuing new ticket and retrying..." - ) - ticket = lock.issue_ticket(lock_lifetime) - - self.save_lock(lock) - - return ticket + return ticket + except Exception as e: + raise LockError(f"Error while acquiring lock. Error:\n{e}") @asynccontextmanager - @async_generator async def lock( self, conversation_id: Text, - lock_lifetime: int = LOCK_LIFETIME, - wait_time_in_seconds: Union[int, float] = 1, - ) -> None: + lock_lifetime: float = LOCK_LIFETIME, + wait_time_in_seconds: float = 1, + ) -> AsyncGenerator[TicketLock, None]: """Acquire lock with lifetime `lock_lifetime`for `conversation_id`. Try acquiring lock with a wait time of `wait_time_in_seconds` seconds between attempts. Raise a `LockError` if lock has expired. """ - ticket = self.issue_ticket(conversation_id, lock_lifetime) - try: - # have to use async_generator.yield_() for py 3.5 compatibility - await yield_( - await self._acquire_lock(conversation_id, ticket, wait_time_in_seconds) + + yield await self._acquire_lock( + conversation_id, ticket, wait_time_in_seconds ) finally: self.cleanup(conversation_id, ticket) async def _acquire_lock( - self, - conversation_id: Text, - ticket: int, - wait_time_in_seconds: Union[int, float], + self, conversation_id: Text, ticket: int, wait_time_in_seconds: float ) -> TicketLock: - + logger.debug(f"Acquiring lock for conversation '{conversation_id}'.") while True: # fetch lock in every iteration because lock might no longer exist lock = self.get_lock(conversation_id) @@ -164,11 +117,12 @@ async def _acquire_lock( # acquire lock if it isn't locked if not lock.is_locked(ticket): + logger.debug(f"Acquired lock for conversation '{conversation_id}'.") return lock logger.debug( - "Failed to acquire lock for conversation ID '{}'. Retrying..." - "".format(conversation_id) + f"Failed to acquire lock for conversation ID '{conversation_id}'. " + f"Retrying..." ) # sleep and update lock @@ -176,8 +130,7 @@ async def _acquire_lock( self.update_lock(conversation_id) raise LockError( - "Could not acquire lock for conversation_id '{}'." - "".format(conversation_id) + f"Could not acquire lock for conversation_id '{conversation_id}'." ) def update_lock(self, conversation_id: Text) -> None: @@ -230,33 +183,9 @@ def cleanup(self, conversation_id: Text, ticket_number: int) -> None: @staticmethod def _log_deletion(conversation_id: Text, deletion_successful: bool) -> None: if deletion_successful: - logger.debug("Deleted lock for conversation '{}'.".format(conversation_id)) + logger.debug(f"Deleted lock for conversation '{conversation_id}'.") else: - logger.debug( - "Could not delete lock for conversation '{}'.".format(conversation_id) - ) - - def ensure_ticket_available(self, lock: TicketLock) -> None: - """Check for duplicate tickets issued for `lock`. - - This function should be called before saving `lock`. Raises `TicketExistsError` - if the last issued ticket for `lock` does not match the last ticket issued - for a lock fetched from storage for `lock.conversation_id`. This indicates - that some other process has issued a ticket for `lock` in the meantime. - """ - - existing_lock = self.get_lock(lock.conversation_id) - if not existing_lock or existing_lock.last_issued == NO_TICKET_ISSUED: - # lock does not yet exist for conversation or no ticket has been issued - return - - # raise if the last issued ticket number of `existing_lock` is not the same as - # that of the one being acquired - if existing_lock.last_issued != lock.last_issued: - raise TicketExistsError( - "Ticket '{}' already exists for conversation ID '{}'." - "".format(existing_lock.last_issued, lock.conversation_id) - ) + logger.debug(f"Could not delete lock for conversation '{conversation_id}'.") class RedisLockStore(LockStore): @@ -268,11 +197,31 @@ def __init__( port: int = 6379, db: int = 1, password: Optional[Text] = None, - ): + use_ssl: bool = False, + socket_timeout: float = DEFAULT_SOCKET_TIMEOUT_IN_SECONDS, + ) -> None: + """Create a lock store which uses Redis for persistence. + + Args: + host: The host of the redis server. + port: The port of the redis server. + db: The name of the database within Redis which should be used by Rasa + Open Source. + password: The password which should be used for authentication with the + Redis database. + use_ssl: `True` if SSL should be used for the connection to Redis. + socket_timeout: Timeout in seconds after which an exception will be raised + in case Redis doesn't respond within `socket_timeout` seconds. + """ import redis self.red = redis.StrictRedis( - host=host, port=int(port), db=int(db), password=password + host=host, + port=int(port), + db=int(db), + password=password, + ssl=use_ssl, + socket_timeout=socket_timeout, ) super().__init__() @@ -292,7 +241,7 @@ def save_lock(self, lock: TicketLock) -> None: class InMemoryLockStore(LockStore): """In-memory store for ticket locks.""" - def __init__(self): + def __init__(self) -> None: self.conversation_locks = {} super().__init__() @@ -307,3 +256,42 @@ def delete_lock(self, conversation_id: Text) -> None: def save_lock(self, lock: TicketLock) -> None: self.conversation_locks[lock.conversation_id] = lock + + +def _create_from_endpoint_config( + endpoint_config: Optional[EndpointConfig] = None, +) -> "LockStore": + """Given an endpoint configuration, create a proper `LockStore` object.""" + + if ( + endpoint_config is None + or endpoint_config.type is None + or endpoint_config.type == "in_memory" + ): + # this is the default type if no lock store type is set + + lock_store = InMemoryLockStore() + elif endpoint_config.type == "redis": + lock_store = RedisLockStore(host=endpoint_config.url, **endpoint_config.kwargs) + else: + lock_store = _load_from_module_name_in_endpoint_config(endpoint_config) + + logger.debug(f"Connected to lock store '{lock_store.__class__.__name__}'.") + + return lock_store + + +def _load_from_module_name_in_endpoint_config( + endpoint_config: EndpointConfig, +) -> "LockStore": + """Retrieve a `LockStore` based on its class name.""" + + try: + lock_store_class = common.class_from_module_path(endpoint_config.type) + return lock_store_class(endpoint_config=endpoint_config) + except (AttributeError, ImportError) as e: + raise Exception( + f"Could not find a class based on the module path " + f"'{endpoint_config.type}'. Failed to create a `LockStore` " + f"instance. Error: {e}" + ) diff --git a/rasa/core/nlg/callback.py b/rasa/core/nlg/callback.py index b570de8c3293..c0c58912d1cd 100644 --- a/rasa/core/nlg/callback.py +++ b/rasa/core/nlg/callback.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -def nlg_response_format_spec(): +def nlg_response_format_spec() -> Dict[Text, Any]: """Expected response schema for an NLG endpoint. Used for validation of the response returned from the NLG endpoint.""" @@ -25,7 +25,7 @@ def nlg_response_format_spec(): } -def nlg_request_format_spec(): +def nlg_request_format_spec() -> Dict[Text, Any]: """Expected request schema for requests sent to an NLG endpoint.""" return { @@ -53,7 +53,7 @@ def nlg_request_format( template_name: Text, tracker: DialogueStateTracker, output_channel: Text, - **kwargs: Any + **kwargs: Any, ) -> Dict[Text, Any]: """Create the json body for the NLG json body for the request.""" @@ -84,7 +84,7 @@ async def generate( template_name: Text, tracker: DialogueStateTracker, output_channel: Text, - **kwargs: Any + **kwargs: Any, ) -> Dict[Text, Any]: """Retrieve a named template from the domain using an endpoint.""" diff --git a/rasa/core/nlg/generator.py b/rasa/core/nlg/generator.py index 5a69d0f2b5a2..0f2d9ecc7b90 100644 --- a/rasa/core/nlg/generator.py +++ b/rasa/core/nlg/generator.py @@ -1,13 +1,24 @@ -from typing import Optional, Union +import logging +from typing import Optional, Union, Text, Any, Dict from rasa.core.domain import Domain +from rasa.utils import common from rasa.utils.endpoints import EndpointConfig +from rasa.core.trackers import DialogueStateTracker +logger = logging.getLogger(__name__) -class NaturalLanguageGenerator(object): + +class NaturalLanguageGenerator: """Generate bot utterances based on a dialogue state.""" - async def generate(self, template_name, tracker, output_channel, **kwargs): + async def generate( + self, + template_name: Text, + tracker: "DialogueStateTracker", + output_channel: Text, + **kwargs: Any, + ) -> Optional[Dict[Text, Any]]: """Generate a response for the requested template. There are a lot of different methods to implement this, e.g. the @@ -24,22 +35,60 @@ def create( if isinstance(obj, NaturalLanguageGenerator): return obj - elif isinstance(obj, EndpointConfig): - from rasa.core.nlg import ( # pytype: disable=pyi-error - CallbackNaturalLanguageGenerator, - ) - - return CallbackNaturalLanguageGenerator(obj) - elif obj is None: - from rasa.core.nlg import ( # pytype: disable=pyi-error - TemplatedNaturalLanguageGenerator, - ) - - templates = domain.templates if domain else [] - return TemplatedNaturalLanguageGenerator(templates) else: - raise Exception( - "Cannot create a NaturalLanguageGenerator " - "based on the passed object. Type: `{}`" - "".format(type(obj)) - ) + return _create_from_endpoint_config(obj, domain) + + +def _create_from_endpoint_config( + endpoint_config: Optional[EndpointConfig] = None, domain: Optional[Domain] = None +) -> "NaturalLanguageGenerator": + """Given an endpoint configuration, create a proper NLG object.""" + + domain = domain or Domain.empty() + + if endpoint_config is None: + from rasa.core.nlg import ( # pytype: disable=pyi-error + TemplatedNaturalLanguageGenerator, + ) + + # this is the default type if no endpoint config is set + nlg = TemplatedNaturalLanguageGenerator(domain.templates) + elif endpoint_config.type is None or endpoint_config.type.lower() == "callback": + from rasa.core.nlg import ( # pytype: disable=pyi-error + CallbackNaturalLanguageGenerator, + ) + + # this is the default type if no nlg type is set + nlg = CallbackNaturalLanguageGenerator(endpoint_config=endpoint_config) + elif endpoint_config.type.lower() == "template": + from rasa.core.nlg import ( # pytype: disable=pyi-error + TemplatedNaturalLanguageGenerator, + ) + + nlg = TemplatedNaturalLanguageGenerator(domain.templates) + else: + nlg = _load_from_module_name_in_endpoint_config(endpoint_config, domain) + + logger.debug(f"Instantiated NLG to '{nlg.__class__.__name__}'.") + return nlg + + +def _load_from_module_name_in_endpoint_config( + endpoint_config: EndpointConfig, domain: Domain +) -> "NaturalLanguageGenerator": + """Initializes a custom natural language generator. + + Args: + domain: defines the universe in which the assistant operates + endpoint_config: the specific natural language generator + """ + + try: + nlg_class = common.class_from_module_path(endpoint_config.type) + return nlg_class(endpoint_config=endpoint_config, domain=domain) + except (AttributeError, ImportError) as e: + raise Exception( + f"Could not find a class based on the module path " + f"'{endpoint_config.type}'. Failed to create a " + f"`NaturalLanguageGenerator` instance. Error: {e}" + ) diff --git a/rasa/core/nlg/interpolator.py b/rasa/core/nlg/interpolator.py index fe2cb827ea2a..72178d99979c 100644 --- a/rasa/core/nlg/interpolator.py +++ b/rasa/core/nlg/interpolator.py @@ -1,52 +1,76 @@ import re import logging +from typing import Text, Dict, Union, Any, List logger = logging.getLogger(__name__) -def interpolate_text(template, values): - if isinstance(template, str): - # transforming template tags from - # "{tag_name}" to "{0[tag_name]}" - # as described here: - # https://stackoverflow.com/questions/7934620/python-dots-in-the-name-of-variable-in-a-format-string#comment9695339_7934969 - # black list character and make sure to not to allow - # (a) newline in slot name - # (b) { or } in slot name - try: - text = re.sub(r"{([^\n{}]+?)}", r"{0[\1]}", template) - text = text.format(values) - if "0[" in text: - # regex replaced tag but format did not replace - # likely cause would be that tag name was enclosed - # in double curly and format func simply escaped it. - # we don't want to return {0[SLOTNAME]} thus - # restoring original value with { being escaped. - return template.format({}) - - return text - except KeyError as e: - logger.exception( - "Failed to fill utterance template '{}'. " - "Tried to replace '{}' but could not find " - "a value for it. There is no slot with this " - "name nor did you pass the value explicitly " - "when calling the template. Return template " - "without filling the template. " - "".format(template, e.args[0]) - ) - return template - return template +def interpolate_text(template: Text, values: Dict[Text, Text]) -> Text: + """Interpolate values into templates with placeholders. + + Transform template tags from "{tag_name}" to "{0[tag_name]}" as described here: + https://stackoverflow.com/questions/7934620/python-dots-in-the-name-of-variable-in-a-format-string#comment9695339_7934969 + Block characters, making sure not to allow: + (a) newline in slot name + (b) { or } in slot name + + Args: + template: The piece of text that should be interpolated. + values: A dictionary of keys and the values that those + keys should be replaced with. + + Returns: + The piece of text with any replacements made. + """ + + try: + text = re.sub(r"{([^\n{}]+?)}", r"{0[\1]}", template) + text = text.format(values) + if "0[" in text: + # regex replaced tag but format did not replace + # likely cause would be that tag name was enclosed + # in double curly and format func simply escaped it. + # we don't want to return {0[SLOTNAME]} thus + # restoring original value with { being escaped. + return template.format({}) + + return text + except KeyError as e: + logger.exception( + f"Failed to fill utterance template '{template}'. " + f"Tried to replace '{e.args[0]}' but could not find " + f"a value for it. There is no slot with this " + f"name nor did you pass the value explicitly " + f"when calling the template. Return template " + f"without filling the template. " + ) + return template + + +def interpolate( + template: Union[List[Any], Dict[Text, Any], Text], values: Dict[Text, Text] +) -> Union[List[Any], Dict[Text, Any], Text]: + """Recursively process template and interpolate any text keys. + Args: + template: The template that should be interpolated. + values: A dictionary of keys and the values that those + keys should be replaced with. -def interpolate(template, values): + Returns: + The template with any replacements made. + """ if isinstance(template, str): return interpolate_text(template, values) elif isinstance(template, dict): for k, v in template.items(): if isinstance(v, dict): interpolate(v, values) - else: + elif isinstance(v, list): + template[k] = [interpolate(i, values) for i in v] + elif isinstance(v, str): template[k] = interpolate_text(v, values) return template + elif isinstance(template, list): + return [interpolate(i, values) for i in template] return template diff --git a/rasa/core/nlg/template.py b/rasa/core/nlg/template.py index fe5869d4317f..9611ca1c8232 100644 --- a/rasa/core/nlg/template.py +++ b/rasa/core/nlg/template.py @@ -1,12 +1,11 @@ import copy import logging -from collections import defaultdict from rasa.core.trackers import DialogueStateTracker from typing import Text, Any, Dict, Optional, List +from rasa.core.nlg import interpolator # pytype: disable=pyi-error from rasa.core.nlg.generator import NaturalLanguageGenerator -from rasa.core.nlg.interpolator import interpolate_text, interpolate logger = logging.getLogger(__name__) @@ -20,7 +19,9 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator): def __init__(self, templates: Dict[Text, List[Dict[Text, Any]]]) -> None: self.templates = templates - def _templates_for_utter_action(self, utter_action, output_channel): + def _templates_for_utter_action( + self, utter_action: Text, output_channel: Text + ) -> List[Dict[Text, Any]]: """Return array of templates that fit the channel and action.""" channel_templates = [] @@ -66,7 +67,7 @@ async def generate( template_name: Text, tracker: DialogueStateTracker, output_channel: Text, - **kwargs: Any + **kwargs: Any, ) -> Optional[Dict[Text, Any]]: """Generate a response for the requested template.""" @@ -80,7 +81,7 @@ def generate_from_slots( template_name: Text, filled_slots: Dict[Text, Any], output_channel: Text, - **kwargs: Any + **kwargs: Any, ) -> Optional[Dict[Text, Any]]: """Generate a response for the requested template.""" @@ -96,7 +97,7 @@ def _fill_template( self, template: Dict[Text, Any], filled_slots: Optional[Dict[Text, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> Dict[Text, Any]: """"Combine slot values and key word arguments to fill templates.""" @@ -107,14 +108,16 @@ def _fill_template( "text", "image", "custom", - "button", + "buttons", "attachment", "quick_replies", ] if template_vars: for key in keys_to_interpolate: if key in template: - template[key] = interpolate(template[key], template_vars) + template[key] = interpolator.interpolate( + template[key], template_vars + ) return template @staticmethod diff --git a/rasa/core/policies/__init__.py b/rasa/core/policies/__init__.py index 57ffed522883..0cc5baef6119 100644 --- a/rasa/core/policies/__init__.py +++ b/rasa/core/policies/__init__.py @@ -1,14 +1,5 @@ # we need to import the policy first from rasa.core.policies.policy import Policy -pass # and after that any implementation from rasa.core.policies.ensemble import SimplePolicyEnsemble, PolicyEnsemble -from rasa.core.policies.embedding_policy import EmbeddingPolicy -from rasa.core.policies.fallback import FallbackPolicy -from rasa.core.policies.keras_policy import KerasPolicy -from rasa.core.policies.memoization import MemoizationPolicy, AugmentedMemoizationPolicy -from rasa.core.policies.sklearn_policy import SklearnPolicy -from rasa.core.policies.form_policy import FormPolicy -from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy -from rasa.core.policies.mapping_policy import MappingPolicy diff --git a/rasa/core/policies/embedding_policy.py b/rasa/core/policies/embedding_policy.py deleted file mode 100644 index 15ce9665d921..000000000000 --- a/rasa/core/policies/embedding_policy.py +++ /dev/null @@ -1,671 +0,0 @@ -from collections import namedtuple -import copy -import json -import logging -import os -import pickle -import warnings - -import numpy as np -from typing import Any, List, Optional, Text, Dict, Tuple - -import rasa.utils.io -from rasa.core import utils -from rasa.core.domain import Domain -from rasa.core.featurizers import ( - TrackerFeaturizer, - FullDialogueTrackerFeaturizer, - LabelTokenizerSingleStateFeaturizer, - MaxHistoryTrackerFeaturizer, -) -from rasa.core.policies.policy import Policy -from rasa.core.constants import DEFAULT_POLICY_PRIORITY -from rasa.core.trackers import DialogueStateTracker -from rasa.utils import train_utils - -import tensorflow as tf - -# avoid warning println on contrib import - remove for tf 2 -tf.contrib._warning = None -logger = logging.getLogger(__name__) - - -class EmbeddingPolicy(Policy): - """Transformer Embedding Dialogue Policy (TEDP) - - Transformer version of the REDP used in our paper https://arxiv.org/abs/1811.11707 - """ - - SUPPORTS_ONLINE_TRAINING = True - - # default properties (DOC MARKER - don't remove) - defaults = { - # nn architecture - # a list of hidden layers sizes before user embed layer - # number of hidden layers is equal to the length of this list - "hidden_layers_sizes_pre_dial": [], - # a list of hidden layers sizes before bot embed layer - # number of hidden layers is equal to the length of this list - "hidden_layers_sizes_bot": [], - # number of units in transformer - "transformer_size": 128, - # number of transformer layers - "num_transformer_layers": 1, - # type of positional encoding in transformer - "pos_encoding": "timing", # string 'timing' or 'emb' - # max sequence length if pos_encoding='emb' - "max_seq_length": 256, - # number of attention heads in transformer - "num_heads": 4, - # training parameters - # initial and final batch sizes: - # batch size will be linearly increased for each epoch - "batch_size": [8, 32], - # how to create batches - "batch_strategy": "balanced", # string 'sequence' or 'balanced' - # number of epochs - "epochs": 1, - # set random seed to any int to get reproducible results - "random_seed": None, - # embedding parameters - # dimension size of embedding vectors - "embed_dim": 20, - # the type of the similarity - "num_neg": 20, - # flag if minimize only maximum similarity over incorrect labels - "similarity_type": "auto", # string 'auto' or 'cosine' or 'inner' - # the type of the loss function - "loss_type": "softmax", # string 'softmax' or 'margin' - # how similar the algorithm should try - # to make embedding vectors for correct labels - "mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine' - # maximum negative similarity for incorrect labels - "mu_neg": -0.2, # should be -1.0 < ... < 1.0 for 'cosine' - # the number of incorrect labels, the algorithm will minimize - # their similarity to the user input during training - "use_max_sim_neg": True, # flag which loss function to use - # scale loss inverse proportionally to confidence of correct prediction - "scale_loss": True, - # regularization - # the scale of L2 regularization - "C2": 0.001, - # the scale of how important is to minimize the maximum similarity - # between embeddings of different labels - "C_emb": 0.8, - # dropout rate for dial nn - "droprate_a": 0.1, - # dropout rate for bot nn - "droprate_b": 0.0, - # visualization of accuracy - # how often calculate validation accuracy - "evaluate_every_num_epochs": 20, # small values may hurt performance - # how many examples to use for hold out validation set - "evaluate_on_num_examples": 0, # large values may hurt performance - } - # end default properties (DOC MARKER - don't remove) - - @staticmethod - def _standard_featurizer(max_history: Optional[int] = None) -> "TrackerFeaturizer": - if max_history is None: - return FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer()) - else: - return MaxHistoryTrackerFeaturizer( - LabelTokenizerSingleStateFeaturizer(), max_history=max_history - ) - - def __init__( - self, - featurizer: Optional["TrackerFeaturizer"] = None, - priority: int = DEFAULT_POLICY_PRIORITY, - graph: Optional["tf.Graph"] = None, - session: Optional["tf.Session"] = None, - user_placeholder: Optional["tf.Tensor"] = None, - bot_placeholder: Optional["tf.Tensor"] = None, - similarity_all: Optional["tf.Tensor"] = None, - pred_confidence: Optional["tf.Tensor"] = None, - similarity: Optional["tf.Tensor"] = None, - dial_embed: Optional["tf.Tensor"] = None, - bot_embed: Optional["tf.Tensor"] = None, - all_bot_embed: Optional["tf.Tensor"] = None, - attention_weights: Optional["tf.Tensor"] = None, - max_history: Optional[int] = None, - **kwargs: Any - ) -> None: - """Declare instant variables with default values""" - - if not featurizer: - featurizer = self._standard_featurizer(max_history) - super(EmbeddingPolicy, self).__init__(featurizer, priority) - - self._load_params(**kwargs) - - # encode all label_ids with numbers - self._encoded_all_label_ids = None - - # tf related instances - self.graph = graph - self.session = session - self.a_in = user_placeholder - self.b_in = bot_placeholder - self.sim_all = similarity_all - self.pred_confidence = pred_confidence - self.sim = similarity - - # persisted embeddings - self.dial_embed = dial_embed - self.bot_embed = bot_embed - self.all_bot_embed = all_bot_embed - - self.attention_weights = attention_weights - # internal tf instances - self._iterator = None - self._train_op = None - self._is_training = None - - # init helpers - def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None: - self.hidden_layers_sizes = { - "pre_dial": config["hidden_layers_sizes_pre_dial"], - "bot": config["hidden_layers_sizes_bot"], - } - - self.pos_encoding = config["pos_encoding"] - self.max_seq_length = config["max_seq_length"] - self.num_heads = config["num_heads"] - - self.transformer_size = config["transformer_size"] - self.num_transformer_layers = config["num_transformer_layers"] - - self.batch_size = config["batch_size"] - self.batch_strategy = config["batch_strategy"] - - self.epochs = config["epochs"] - - self.random_seed = config["random_seed"] - - def _load_embedding_params(self, config: Dict[Text, Any]) -> None: - self.embed_dim = config["embed_dim"] - self.num_neg = config["num_neg"] - - self.similarity_type = config["similarity_type"] - self.loss_type = config["loss_type"] - if self.similarity_type == "auto": - if self.loss_type == "softmax": - self.similarity_type = "inner" - elif self.loss_type == "margin": - self.similarity_type = "cosine" - - self.mu_pos = config["mu_pos"] - self.mu_neg = config["mu_neg"] - self.use_max_sim_neg = config["use_max_sim_neg"] - - self.scale_loss = config["scale_loss"] - - def _load_regularization_params(self, config: Dict[Text, Any]) -> None: - self.C2 = config["C2"] - self.C_emb = config["C_emb"] - self.droprate = {"bot": config["droprate_b"], "dial": config["droprate_a"]} - - def _load_visual_params(self, config: Dict[Text, Any]) -> None: - self.evaluate_every_num_epochs = config["evaluate_every_num_epochs"] - if self.evaluate_every_num_epochs < 1: - self.evaluate_every_num_epochs = self.epochs - self.evaluate_on_num_examples = config["evaluate_on_num_examples"] - - def _load_params(self, **kwargs: Dict[Text, Any]) -> None: - config = copy.deepcopy(self.defaults) - config.update(kwargs) - - self._tf_config = train_utils.load_tf_config(config) - self._load_nn_architecture_params(config) - self._load_embedding_params(config) - self._load_regularization_params(config) - self._load_visual_params(config) - - # data helpers - # noinspection PyPep8Naming - @staticmethod - def _label_ids_for_Y(data_Y: "np.ndarray") -> "np.ndarray": - """Prepare Y data for training: extract label_ids.""" - - return data_Y.argmax(axis=-1) - - # noinspection PyPep8Naming - def _label_features_for_Y(self, label_ids: "np.ndarray") -> "np.ndarray": - """Prepare Y data for training: features for label_ids.""" - - if len(label_ids.shape) == 2: # full dialogue featurizer is used - return np.stack( - [ - np.stack( - [ - self._encoded_all_label_ids[label_idx] - for label_idx in seq_label_ids - ] - ) - for seq_label_ids in label_ids - ] - ) - else: # max history featurizer is used - return np.stack( - [self._encoded_all_label_ids[label_idx] for label_idx in label_ids] - ) - - # noinspection PyPep8Naming - def _create_session_data( - self, data_X: "np.ndarray", data_Y: Optional["np.ndarray"] = None - ) -> "train_utils.SessionData": - """Combine all tf session related data into a named tuple""" - - if data_Y is not None: - # training time - label_ids = self._label_ids_for_Y(data_Y) - Y = self._label_features_for_Y(label_ids) - - # idea taken from sklearn's stratify split - if label_ids.ndim == 2: - # for multi-label y, map each distinct row to a string repr - # using join because str(row) uses an ellipsis if len(row) > 1000 - label_ids = np.array([" ".join(row.astype("str")) for row in label_ids]) - else: - # prediction time - label_ids = None - Y = None - - return train_utils.SessionData(X=data_X, Y=Y, label_ids=label_ids) - - def _create_tf_bot_embed(self, b_in: "tf.Tensor") -> "tf.Tensor": - """Create embedding bot vector.""" - - b = train_utils.create_tf_fnn( - b_in, - self.hidden_layers_sizes["bot"], - self.droprate["bot"], - self.C2, - self._is_training, - layer_name_suffix="bot", - ) - return train_utils.create_tf_embed( - b, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="bot" - ) - - def _create_tf_dial(self, a_in) -> Tuple["tf.Tensor", "tf.Tensor"]: - """Create dialogue level embedding and mask.""" - - # mask different length sequences - # if there is at least one `-1` it should be masked - mask = tf.sign(tf.reduce_max(self.a_in, -1) + 1) - - a = train_utils.create_tf_fnn( - a_in, - self.hidden_layers_sizes["pre_dial"], - self.droprate["dial"], - self.C2, - self._is_training, - layer_name_suffix="pre_dial", - ) - - self.attention_weights = {} - hparams = train_utils.create_t2t_hparams( - self.num_transformer_layers, - self.transformer_size, - self.num_heads, - self.droprate["dial"], - self.pos_encoding, - self.max_seq_length, - self._is_training, - ) - - a = train_utils.create_t2t_transformer_encoder( - a, mask, self.attention_weights, hparams, self.C2, self._is_training - ) - - if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer): - # pick last label if max history featurizer is used - a = a[:, -1:, :] - mask = mask[:, -1:] - - dial_embed = train_utils.create_tf_embed( - a, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="dial" - ) - - return dial_embed, mask - - def _build_tf_train_graph(self) -> Tuple["tf.Tensor", "tf.Tensor"]: - """Bulid train graph using iterator.""" - - # session data are int counts but we need a float tensors - self.a_in, self.b_in = self._iterator.get_next() - if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer): - # add time dimension if max history featurizer is used - self.b_in = self.b_in[:, tf.newaxis, :] - - all_bot_raw = tf.constant( - self._encoded_all_label_ids, dtype=tf.float32, name="all_bot_raw" - ) - - self.dial_embed, mask = self._create_tf_dial(self.a_in) - - self.bot_embed = self._create_tf_bot_embed(self.b_in) - self.all_bot_embed = self._create_tf_bot_embed(all_bot_raw) - - return train_utils.calculate_loss_acc( - self.dial_embed, - self.bot_embed, - self.b_in, - self.all_bot_embed, - all_bot_raw, - self.num_neg, - mask, - self.loss_type, - self.mu_pos, - self.mu_neg, - self.use_max_sim_neg, - self.C_emb, - self.scale_loss, - ) - - # prepare for prediction - def _create_tf_placeholders(self, session_data: "train_utils.SessionData") -> None: - """Create placeholders for prediction.""" - - dialogue_len = None # use dynamic time - self.a_in = tf.placeholder( - dtype=tf.float32, - shape=(None, dialogue_len, session_data.X.shape[-1]), - name="a", - ) - self.b_in = tf.placeholder( - dtype=tf.float32, - shape=(None, dialogue_len, None, session_data.Y.shape[-1]), - name="b", - ) - - def _build_tf_pred_graph( - self, session_data: "train_utils.SessionData" - ) -> "tf.Tensor": - """Rebuild tf graph for prediction.""" - - self._create_tf_placeholders(session_data) - - self.dial_embed, mask = self._create_tf_dial(self.a_in) - - self.sim_all = train_utils.tf_raw_sim( - self.dial_embed[:, :, tf.newaxis, :], - self.all_bot_embed[tf.newaxis, tf.newaxis, :, :], - mask, - ) - - self.bot_embed = self._create_tf_bot_embed(self.b_in) - - self.sim = train_utils.tf_raw_sim( - self.dial_embed[:, :, tf.newaxis, :], self.bot_embed, mask - ) - - return train_utils.confidence_from_sim(self.sim_all, self.similarity_type) - - # training methods - def train( - self, - training_trackers: List["DialogueStateTracker"], - domain: "Domain", - **kwargs: Any - ) -> None: - """Train the policy on given training trackers.""" - - logger.debug("Started training embedding policy.") - - # set numpy random seed - np.random.seed(self.random_seed) - - # dealing with training data - training_data = self.featurize_for_training(training_trackers, domain, **kwargs) - - # encode all label_ids with policies' featurizer - state_featurizer = self.featurizer.state_featurizer - self._encoded_all_label_ids = state_featurizer.create_encoded_all_actions( - domain - ) - - # check if number of negatives is less than number of label_ids - logger.debug( - "Check if num_neg {} is smaller " - "than number of label_ids {}, " - "else set num_neg to the number of label_ids - 1" - "".format(self.num_neg, domain.num_actions) - ) - # noinspection PyAttributeOutsideInit - self.num_neg = min(self.num_neg, domain.num_actions - 1) - - # extract actual training data to feed to tf session - session_data = self._create_session_data(training_data.X, training_data.y) - - if self.evaluate_on_num_examples: - session_data, eval_session_data = train_utils.train_val_split( - session_data, self.evaluate_on_num_examples, self.random_seed - ) - else: - eval_session_data = None - - self.graph = tf.Graph() - with self.graph.as_default(): - # set random seed in tf - tf.set_random_seed(self.random_seed) - - # allows increasing batch size - batch_size_in = tf.placeholder(tf.int64) - - ( - self._iterator, - train_init_op, - eval_init_op, - ) = train_utils.create_iterator_init_datasets( - session_data, eval_session_data, batch_size_in, self.batch_strategy - ) - - self._is_training = tf.placeholder_with_default(False, shape=()) - - loss, acc = self._build_tf_train_graph() - - # define which optimizer to use - self._train_op = tf.train.AdamOptimizer().minimize(loss) - - # train tensorflow graph - self.session = tf.Session(config=self._tf_config) - train_utils.train_tf_dataset( - train_init_op, - eval_init_op, - batch_size_in, - loss, - acc, - self._train_op, - self.session, - self._is_training, - self.epochs, - self.batch_size, - self.evaluate_on_num_examples, - self.evaluate_every_num_epochs, - ) - - # rebuild the graph for prediction - self.pred_confidence = self._build_tf_pred_graph(session_data) - - self.attention_weights = train_utils.extract_attention( - self.attention_weights - ) - - def continue_training( - self, - training_trackers: List["DialogueStateTracker"], - domain: "Domain", - **kwargs: Any - ) -> None: - """Continue training an already trained policy.""" - - batch_size = kwargs.get("batch_size", 5) - epochs = kwargs.get("epochs", 50) - - with self.graph.as_default(): - for _ in range(epochs): - training_data = self._training_data_for_continue_training( - batch_size, training_trackers, domain - ) - - session_data = self._create_session_data( - training_data.X, training_data.y - ) - train_dataset = train_utils.create_tf_dataset(session_data, batch_size) - train_init_op = self._iterator.make_initializer(train_dataset) - self.session.run(train_init_op) - - # fit to one extra example using updated trackers - while True: - try: - self.session.run( - self._train_op, feed_dict={self._is_training: True} - ) - - except tf.errors.OutOfRangeError: - break - - def tf_feed_dict_for_prediction( - self, tracker: "DialogueStateTracker", domain: "Domain" - ) -> Dict["tf.Tensor", "np.ndarray"]: - """Create feed dictionary for tf session.""" - - # noinspection PyPep8Naming - data_X = self.featurizer.create_X([tracker], domain) - session_data = self._create_session_data(data_X) - - return {self.a_in: session_data.X} - - def predict_action_probabilities( - self, tracker: "DialogueStateTracker", domain: "Domain" - ) -> List[float]: - """Predict the next action the bot should take. - - Return the list of probabilities for the next actions. - """ - - if self.session is None: - logger.error( - "There is no trained tf.session: " - "component is either not trained or " - "didn't receive enough training data" - ) - return [0.0] * domain.num_actions - - tf_feed_dict = self.tf_feed_dict_for_prediction(tracker, domain) - - confidence = self.session.run(self.pred_confidence, feed_dict=tf_feed_dict) - - return confidence[0, -1, :].tolist() - - def persist(self, path: Text) -> None: - """Persists the policy to a storage.""" - - if self.session is None: - warnings.warn( - "Method `persist(...)` was called " - "without a trained model present. " - "Nothing to persist then!" - ) - return - - self.featurizer.persist(path) - - meta = {"priority": self.priority} - - meta_file = os.path.join(path, "embedding_policy.json") - utils.dump_obj_as_json_to_file(meta_file, meta) - - file_name = "tensorflow_embedding.ckpt" - checkpoint = os.path.join(path, file_name) - rasa.utils.io.create_directory_for_file(checkpoint) - - with self.graph.as_default(): - train_utils.persist_tensor("user_placeholder", self.a_in, self.graph) - train_utils.persist_tensor("bot_placeholder", self.b_in, self.graph) - - train_utils.persist_tensor("similarity_all", self.sim_all, self.graph) - train_utils.persist_tensor( - "pred_confidence", self.pred_confidence, self.graph - ) - train_utils.persist_tensor("similarity", self.sim, self.graph) - - train_utils.persist_tensor("dial_embed", self.dial_embed, self.graph) - train_utils.persist_tensor("bot_embed", self.bot_embed, self.graph) - train_utils.persist_tensor("all_bot_embed", self.all_bot_embed, self.graph) - - train_utils.persist_tensor( - "attention_weights", self.attention_weights, self.graph - ) - - saver = tf.train.Saver() - saver.save(self.session, checkpoint) - - with open(os.path.join(path, file_name + ".tf_config.pkl"), "wb") as f: - pickle.dump(self._tf_config, f) - - @classmethod - def load(cls, path: Text) -> "EmbeddingPolicy": - """Loads a policy from the storage. - - **Needs to load its featurizer** - """ - - if not os.path.exists(path): - raise Exception( - "Failed to load dialogue model. Path '{}' " - "doesn't exist".format(os.path.abspath(path)) - ) - - featurizer = TrackerFeaturizer.load(path) - - file_name = "tensorflow_embedding.ckpt" - checkpoint = os.path.join(path, file_name) - - if not os.path.exists(checkpoint + ".meta"): - return cls(featurizer=featurizer) - - meta_file = os.path.join(path, "embedding_policy.json") - meta = json.loads(rasa.utils.io.read_file(meta_file)) - - with open(os.path.join(path, file_name + ".tf_config.pkl"), "rb") as f: - _tf_config = pickle.load(f) - - graph = tf.Graph() - with graph.as_default(): - session = tf.Session(config=_tf_config) - saver = tf.train.import_meta_graph(checkpoint + ".meta") - - saver.restore(session, checkpoint) - - a_in = train_utils.load_tensor("user_placeholder") - b_in = train_utils.load_tensor("bot_placeholder") - - sim_all = train_utils.load_tensor("similarity_all") - pred_confidence = train_utils.load_tensor("pred_confidence") - sim = train_utils.load_tensor("similarity") - - dial_embed = train_utils.load_tensor("dial_embed") - bot_embed = train_utils.load_tensor("bot_embed") - all_bot_embed = train_utils.load_tensor("all_bot_embed") - - attention_weights = train_utils.load_tensor("attention_weights") - - return cls( - featurizer=featurizer, - priority=meta["priority"], - graph=graph, - session=session, - user_placeholder=a_in, - bot_placeholder=b_in, - similarity_all=sim_all, - pred_confidence=pred_confidence, - similarity=sim, - dial_embed=dial_embed, - bot_embed=bot_embed, - all_bot_embed=all_bot_embed, - attention_weights=attention_weights, - ) diff --git a/rasa/core/policies/ensemble.py b/rasa/core/policies/ensemble.py index 4ffb75d63600..7fda792ba2e7 100644 --- a/rasa/core/policies/ensemble.py +++ b/rasa/core/policies/ensemble.py @@ -5,43 +5,48 @@ import sys from collections import defaultdict from datetime import datetime -from typing import Text, Optional, Any, List, Dict, Tuple - -import numpy as np +from typing import Text, Optional, Any, List, Dict, Tuple, Set, NamedTuple, Type import rasa.core import rasa.utils.io -from rasa.constants import MINIMUM_COMPATIBLE_VERSION, DOCS_BASE_URL +from rasa.constants import ( + MINIMUM_COMPATIBLE_VERSION, + DOCS_URL_POLICIES, + DEFAULT_CONFIG_PATH, + DOCS_URL_MIGRATION_GUIDE, + DOCS_URL_RULES, +) -from rasa.core import utils, training +from rasa.core import utils from rasa.core.constants import USER_INTENT_BACK, USER_INTENT_RESTART from rasa.core.actions.action import ( ACTION_LISTEN_NAME, ACTION_BACK_NAME, ACTION_RESTART_NAME, ) -from rasa.core.domain import Domain -from rasa.core.events import SlotSet, ActionExecuted, ActionExecutionRejected +from rasa.core.domain import Domain, InvalidDomain +from rasa.core.events import SlotSet, ActionExecuted, ActionExecutionRejected, Event from rasa.core.exceptions import UnsupportedDialogueModelError from rasa.core.featurizers import MaxHistoryTrackerFeaturizer -from rasa.core.policies.policy import Policy +from rasa.core.interpreter import NaturalLanguageInterpreter +from rasa.core.policies.policy import Policy, SupportedData from rasa.core.policies.fallback import FallbackPolicy from rasa.core.policies.memoization import MemoizationPolicy, AugmentedMemoizationPolicy +from rasa.core.policies.rule_policy import RulePolicy from rasa.core.trackers import DialogueStateTracker from rasa.core import registry -from rasa.utils.common import class_from_module_path +from rasa.utils import common as common_utils logger = logging.getLogger(__name__) -class PolicyEnsemble(object): +class PolicyEnsemble: versioned_packages = ["rasa", "tensorflow", "sklearn"] def __init__( self, policies: List[Policy], action_fingerprints: Optional[Dict] = None ) -> None: self.policies = policies - self.training_trackers = None self.date_trained = None if action_fingerprints: @@ -52,23 +57,20 @@ def __init__( self._check_priorities() self._check_for_important_policies() - def _check_for_important_policies(self): + def _check_for_important_policies(self) -> None: from rasa.core.policies.mapping_policy import MappingPolicy - if not any(isinstance(policy, MappingPolicy) for policy in self.policies): + if not any( + isinstance(policy, (MappingPolicy, RulePolicy)) for policy in self.policies + ): logger.info( - "MappingPolicy not included in policy ensemble. Default intents " - "'{} and {} will not trigger actions '{}' and '{}'." - "".format( - USER_INTENT_RESTART, - USER_INTENT_BACK, - ACTION_RESTART_NAME, - ACTION_BACK_NAME, - ) + f"MappingPolicy not included in policy ensemble. Default intents " + f"'{USER_INTENT_RESTART} and {USER_INTENT_BACK} will not trigger " + f"actions '{ACTION_RESTART_NAME}' and '{ACTION_BACK_NAME}'." ) @staticmethod - def _training_events_from_trackers(training_trackers): + def _training_events_from_trackers(training_trackers) -> Dict[Text, Set[Event]]: events_metadata = defaultdict(set) for t in training_trackers: @@ -87,18 +89,19 @@ def check_domain_ensemble_compatibility( ) -> None: """Check for elements that only work with certain policy/domain combinations.""" - from rasa.core.policies.form_policy import FormPolicy from rasa.core.policies.mapping_policy import MappingPolicy from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy policies_needing_validation = [ - FormPolicy, MappingPolicy, TwoStageFallbackPolicy, + RulePolicy, ] for policy in policies_needing_validation: policy.validate_against_domain(ensemble, domain) + _check_policy_for_forms_available(domain, ensemble) + def _check_priorities(self) -> None: """Checks for duplicate policy priorities within PolicyEnsemble.""" @@ -108,32 +111,108 @@ def _check_priorities(self) -> None: for k, v in priority_dict.items(): if len(v) > 1: - logger.warning( - ( - "Found policies {} with same priority {} " - "in PolicyEnsemble. When personalizing " - "priorities, be sure to give all policies " - "different priorities. More information: " - "{}/core/policies/" - ).format(v, k, DOCS_BASE_URL) + common_utils.raise_warning( + f"Found policies {v} with same priority {k} " + f"in PolicyEnsemble. When personalizing " + f"priorities, be sure to give all policies " + f"different priorities.", + docs=DOCS_URL_POLICIES, ) + def _policy_ensemble_contains_policy_with_rules_support(self) -> bool: + """Determine whether the policy ensemble contains at least one policy + supporting rule-based data. + + Returns: + Whether or not the policy ensemble contains at least one policy that + supports rule-based data. + """ + return any( + policy.supported_data() + in [SupportedData.RULE_DATA, SupportedData.ML_AND_RULE_DATA] + for policy in self.policies + ) + + @staticmethod + def _training_trackers_contain_rule_trackers( + training_trackers: List[DialogueStateTracker], + ) -> bool: + """Determine whether there are rule-based training trackers. + + Args: + training_trackers: Trackers to inspect. + + Returns: + Whether or not any of the supplied training trackers contain rule-based + data. + """ + return any(tracker.is_rule_tracker for tracker in training_trackers) + + def _emit_rule_policy_warning( + self, training_trackers: List[DialogueStateTracker] + ) -> None: + """Emit `UserWarning`s about missing rule-based data.""" + is_rules_consuming_policy_available = ( + self._policy_ensemble_contains_policy_with_rules_support() + ) + training_trackers_contain_rule_trackers = self._training_trackers_contain_rule_trackers( + training_trackers + ) + + if ( + is_rules_consuming_policy_available + and not training_trackers_contain_rule_trackers + ): + common_utils.raise_warning( + f"Found a rule-based policy in your pipeline but " + f"no rule-based training data. Please add rule-based " + f"stories to your training data or " + f"remove the rule-based policy (`{RulePolicy.__name__}`) from your " + f"your pipeline.", + docs=DOCS_URL_RULES, + ) + elif ( + not is_rules_consuming_policy_available + and training_trackers_contain_rule_trackers + ): + common_utils.raise_warning( + f"Found rule-based training data but no policy supporting rule-based " + f"data. Please add `{RulePolicy.__name__}` or another rule-supporting " + f"policy to the `policies` section in `{DEFAULT_CONFIG_PATH}`.", + docs=DOCS_URL_RULES, + ) + def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: if training_trackers: + self._emit_rule_policy_warning(training_trackers) + for policy in self.policies: - policy.train(training_trackers, domain, **kwargs) + trackers_to_train = SupportedData.trackers_for_policy( + policy, training_trackers + ) + policy.train( + trackers_to_train, domain, interpreter=interpreter, **kwargs + ) + + training_events = self._training_events_from_trackers(training_trackers) + self.action_fingerprints = self._create_action_fingerprints(training_events) else: logger.info("Skipped training, because there are no training samples.") - self.training_trackers = training_trackers + self.date_trained = datetime.now().strftime("%Y%m%d-%H%M%S") def probabilities_using_best_policy( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> Tuple[Optional[List[float]], Optional[Text]]: raise NotImplementedError @@ -149,7 +228,9 @@ def _max_histories(self) -> List[Optional[int]]: return max_histories @staticmethod - def _create_action_fingerprints(training_events): + def _create_action_fingerprints( + training_events: Dict[Text, Set[Event]] + ) -> Optional[Dict[Any, Dict[Text, List]]]: """Fingerprint each action using the events it created during train. This allows us to emit warnings when the model is used @@ -174,24 +255,17 @@ def _add_package_version_info(self, metadata: Dict[Text, Any]) -> None: except ImportError: pass - def _persist_metadata( - self, path: Text, dump_flattened_stories: bool = False - ) -> None: + def _persist_metadata(self, path: Text) -> None: """Persists the domain specification to storage.""" # make sure the directory we persist exists domain_spec_path = os.path.join(path, "metadata.json") - training_data_path = os.path.join(path, "stories.md") rasa.utils.io.create_directory_for_file(domain_spec_path) policy_names = [utils.module_path_from_instance(p) for p in self.policies] - training_events = self._training_events_from_trackers(self.training_trackers) - - action_fingerprints = self._create_action_fingerprints(training_events) - metadata = { - "action_fingerprints": action_fingerprints, + "action_fingerprints": self.action_fingerprints, "python": ".".join([str(s) for s in sys.version_info[:3]]), "max_histories": self._max_histories(), "ensemble_name": self.__module__ + "." + self.__class__.__name__, @@ -201,17 +275,12 @@ def _persist_metadata( self._add_package_version_info(metadata) - utils.dump_obj_as_json_to_file(domain_spec_path, metadata) + rasa.utils.io.dump_obj_as_json_to_file(domain_spec_path, metadata) - # if there are lots of stories, saving flattened stories takes a long - # time, so this is turned off by default - if dump_flattened_stories: - training.persist_data(self.training_trackers, training_data_path) - - def persist(self, path: Text, dump_flattened_stories: bool = False) -> None: + def persist(self, path: Text) -> None: """Persists the policy to storage.""" - self._persist_metadata(path, dump_flattened_stories) + self._persist_metadata(path) for i, policy in enumerate(self.policies): dir_name = "policy_{}_{}".format(i, type(policy).__name__) @@ -219,13 +288,13 @@ def persist(self, path: Text, dump_flattened_stories: bool = False) -> None: policy.persist(policy_path) @classmethod - def load_metadata(cls, path): + def load_metadata(cls, path) -> Any: metadata_path = os.path.join(path, "metadata.json") metadata = json.loads(rasa.utils.io.read_file(os.path.abspath(metadata_path))) return metadata @staticmethod - def ensure_model_compatibility(metadata, version_to_check=None): + def ensure_model_compatibility(metadata, version_to_check=None) -> None: from packaging import version if version_to_check is None: @@ -247,9 +316,7 @@ def ensure_model_compatibility(metadata, version_to_check=None): @classmethod def _ensure_loaded_policy(cls, policy, policy_cls, policy_name: Text): if policy is None: - raise Exception( - "Failed to load policy {}: load returned None".format(policy_name) - ) + raise Exception(f"Failed to load policy {policy_name}: load returned None") elif not isinstance(policy, policy_cls): raise Exception( "Failed to load policy {}: " @@ -266,19 +333,23 @@ def load(cls, path: Text) -> "PolicyEnsemble": policies = [] for i, policy_name in enumerate(metadata["policy_names"]): policy_cls = registry.policy_from_module_path(policy_name) - dir_name = "policy_{}_{}".format(i, policy_cls.__name__) + dir_name = f"policy_{i}_{policy_cls.__name__}" policy_path = os.path.join(path, dir_name) policy = policy_cls.load(policy_path) cls._ensure_loaded_policy(policy, policy_cls, policy_name) policies.append(policy) - ensemble_cls = class_from_module_path(metadata["ensemble_name"]) + ensemble_cls = common_utils.class_from_module_path(metadata["ensemble_name"]) fingerprints = metadata.get("action_fingerprints", {}) ensemble = ensemble_cls(policies, fingerprints) return ensemble @classmethod - def from_dict(cls, dictionary: Dict[Text, Any]) -> List[Policy]: - policies = dictionary.get("policies") or dictionary.get("policy") + def from_dict(cls, policy_configuration: Dict[Text, Any]) -> List[Policy]: + import copy + + policies = policy_configuration.get("policies") or policy_configuration.get( + "policy" + ) if policies is None: raise InvalidPolicyConfig( "You didn't define any policies. " @@ -290,10 +361,10 @@ def from_dict(cls, dictionary: Dict[Text, Any]) -> List[Policy]: "The policy configuration file has to include at least one policy." ) + policies = copy.deepcopy(policies) # don't manipulate passed `Dict` parsed_policies = [] for policy in policies: - policy_name = policy.pop("name") if policy.get("featurizer"): featurizer_func, featurizer_config = cls.get_featurizer_from_dict( @@ -301,9 +372,10 @@ def from_dict(cls, dictionary: Dict[Text, Any]) -> List[Policy]: ) if featurizer_config.get("state_featurizer"): - state_featurizer_func, state_featurizer_config = cls.get_state_featurizer_from_dict( - featurizer_config - ) + ( + state_featurizer_func, + state_featurizer_config, + ) = cls.get_state_featurizer_from_dict(featurizer_config) # override featurizer's state_featurizer # with real state_featurizer class @@ -316,7 +388,10 @@ def from_dict(cls, dictionary: Dict[Text, Any]) -> List[Policy]: try: constr_func = registry.policy_from_module_path(policy_name) - policy_object = constr_func(**policy) + try: + policy_object = constr_func(**policy) + except TypeError as e: + raise Exception(f"Could not initialize {policy_name}. {e}") parsed_policies.append(policy_object) except (ImportError, AttributeError): raise InvalidPolicyConfig( @@ -326,10 +401,12 @@ def from_dict(cls, dictionary: Dict[Text, Any]) -> List[Policy]: "".format(policy_name) ) + cls._assert_rule_policy_not_used_with_other_rule_like_policy(parsed_policies) + return parsed_policies @classmethod - def get_featurizer_from_dict(cls, policy): + def get_featurizer_from_dict(cls, policy) -> Tuple[Any, Any]: # policy can have only 1 featurizer if len(policy["featurizer"]) > 1: raise InvalidPolicyConfig("policy can have only 1 featurizer") @@ -340,7 +417,7 @@ def get_featurizer_from_dict(cls, policy): return featurizer_func, featurizer_config @classmethod - def get_state_featurizer_from_dict(cls, featurizer_config): + def get_state_featurizer_from_dict(cls, featurizer_config) -> Tuple[Any, Any]: # featurizer can have only 1 state featurizer if len(featurizer_config["state_featurizer"]) > 1: raise InvalidPolicyConfig("featurizer can have only 1 state featurizer") @@ -352,86 +429,293 @@ def get_state_featurizer_from_dict(cls, featurizer_config): return state_featurizer_func, state_featurizer_config - def continue_training( - self, trackers: List[DialogueStateTracker], domain: Domain, **kwargs: Any + @staticmethod + def _assert_rule_policy_not_used_with_other_rule_like_policy( + policies: List[Policy], ) -> None: + if not any(isinstance(policy, RulePolicy) for policy in policies): + return - self.training_trackers.extend(trackers) - for p in self.policies: - p.continue_training(self.training_trackers, domain, **kwargs) + from rasa.core.policies.mapping_policy import MappingPolicy + from rasa.core.policies.form_policy import FormPolicy + from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy + + policies_not_be_used_with_rule_policy = ( + MappingPolicy, + FormPolicy, + FallbackPolicy, + TwoStageFallbackPolicy, + ) + + if any( + isinstance(policy, policies_not_be_used_with_rule_policy) + for policy in policies + ): + raise InvalidPolicyConfig( + f"It is not possible to use the RulePolicy with " + f"other policies which implement rule-like " + f"behavior. Either re-implement the desired " + f"behavior as rules or remove the RulePolicy from" + f"your policy configuration. Please see the Rasa Open Source 2.0 " + f"migration guide ({DOCS_URL_MIGRATION_GUIDE}) for more information." + ) + + +class Prediction(NamedTuple): + """Stores the probabilities and the priority of the prediction.""" + + probabilities: List[float] + priority: int class SimplePolicyEnsemble(PolicyEnsemble): @staticmethod - def is_not_memo_policy(best_policy_name): - is_memo = best_policy_name.endswith("_" + MemoizationPolicy.__name__) - is_augmented = best_policy_name.endswith( - "_" + AugmentedMemoizationPolicy.__name__ - ) - return not (is_memo or is_augmented) + def is_not_memo_policy( + policy_name: Text, max_confidence: Optional[float] = None + ) -> bool: + is_memo = policy_name.endswith("_" + MemoizationPolicy.__name__) + is_augmented = policy_name.endswith("_" + AugmentedMemoizationPolicy.__name__) + # also check if confidence is 0, than it cannot be count as prediction + return not (is_memo or is_augmented) or max_confidence == 0.0 - def probabilities_using_best_policy( - self, tracker: DialogueStateTracker, domain: Domain + @staticmethod + def _is_not_mapping_policy( + policy_name: Text, max_confidence: Optional[float] = None + ) -> bool: + from rasa.core.policies.mapping_policy import MappingPolicy + + is_mapping = policy_name.endswith("_" + MappingPolicy.__name__) + # also check if confidence is 0, than it cannot be count as prediction + return not is_mapping or max_confidence == 0.0 + + @staticmethod + def _is_form_policy(policy_name: Text) -> bool: + from rasa.core.policies.form_policy import FormPolicy + + return policy_name.endswith("_" + FormPolicy.__name__) + + def _pick_best_policy( + self, predictions: Dict[Text, Prediction] ) -> Tuple[Optional[List[float]], Optional[Text]]: - result = None - max_confidence = -1 + """Picks the best policy prediction based on probabilities and policy priority. + + Args: + predictions: the dictionary containing policy name as keys + and predictions as values + + Returns: + best_probabilities: the list of probabilities for the next actions + best_policy_name: the name of the picked policy + """ + + best_confidence = (-1, -1) best_policy_name = None - best_policy_priority = -1 - for i, p in enumerate(self.policies): - probabilities = p.predict_action_probabilities(tracker, domain) + # form and mapping policies are special: + # form should be above fallback + # mapping should be below fallback + # mapping is above form if it wins over fallback + # therefore form predictions are stored separately + + form_confidence = None + form_policy_name = None + + for policy_name, prediction in predictions.items(): + confidence = (max(prediction.probabilities), prediction.priority) + if self._is_form_policy(policy_name): + # store form prediction separately + form_confidence = confidence + form_policy_name = policy_name + elif confidence > best_confidence: + # pick the best policy + best_confidence = confidence + best_policy_name = policy_name + + if form_confidence is not None and self._is_not_mapping_policy( + best_policy_name, best_confidence[0] + ): + # if mapping didn't win, check form policy predictions + if form_confidence > best_confidence: + best_policy_name = form_policy_name + + return predictions[best_policy_name].probabilities, best_policy_name - if len(tracker.events) > 0 and isinstance( - tracker.events[-1], ActionExecutionRejected - ): - probabilities[ - domain.index_for_action(tracker.events[-1].action_name) + def _best_policy_prediction( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter, + ) -> Tuple[Optional[List[float]], Optional[Text]]: + """Finds the best policy prediction. + + Args: + tracker: the :class:`rasa.core.trackers.DialogueStateTracker` + domain: the :class:`rasa.core.domain.Domain` + interpreter: Interpreter which may be used by the policies to create + additional features. + + Returns: + probabilities: the list of probabilities for the next actions + policy_name: the name of the picked policy + """ + + # find rejected action before running the policies + # because some of them might add events + rejected_action_name = None + if len(tracker.events) > 0 and isinstance( + tracker.events[-1], ActionExecutionRejected + ): + rejected_action_name = tracker.events[-1].action_name + + predictions = { + f"policy_{i}_{type(p).__name__}": self._get_prediction( + p, tracker, domain, interpreter + ) + for i, p in enumerate(self.policies) + } + + if rejected_action_name: + logger.debug( + f"Execution of '{rejected_action_name}' was rejected. " + f"Setting its confidence to 0.0 in all predictions." + ) + for prediction in predictions.values(): + prediction.probabilities[ + domain.index_for_action(rejected_action_name) ] = 0.0 - confidence = np.max(probabilities) - if (confidence, p.priority) > (max_confidence, best_policy_priority): - max_confidence = confidence - result = probabilities - best_policy_name = "policy_{}_{}".format(i, type(p).__name__) - best_policy_priority = p.priority + return self._pick_best_policy(predictions) + + @staticmethod + def _get_prediction( + policy: Policy, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter, + ) -> Prediction: + number_of_arguments_in_rasa_1_0 = 2 + arguments = common_utils.arguments_of(policy.predict_action_probabilities) + if ( + len(arguments) > number_of_arguments_in_rasa_1_0 + and "interpreter" in arguments + ): + probabilities = policy.predict_action_probabilities( + tracker, domain, interpreter + ) + else: + common_utils.raise_warning( + "The function `predict_action_probabilities` of " + "the `Policy` interface was changed to support " + "additional parameters. Please make sure to " + "adapt your custom `Policy` implementation.", + category=DeprecationWarning, + ) + probabilities = policy.predict_action_probabilities(tracker, domain) + + return Prediction(probabilities, policy.priority) + + def _fallback_after_listen( + self, domain: Domain, probabilities: List[float], policy_name: Text + ) -> Tuple[List[float], Text]: + """Triggers fallback if `action_listen` is predicted after a user utterance. + + This is done on the condition that: + - a fallback policy is present, + - there was just a user message and the predicted + action is action_listen by a policy + other than the MemoizationPolicy + + Args: + domain: the :class:`rasa.core.domain.Domain` + probabilities: the list of probabilities for the next actions + policy_name: the name of the picked policy + + Returns: + probabilities: the list of probabilities for the next actions + policy_name: the name of the picked policy + """ + + fallback_idx_policy = [ + (i, p) for i, p in enumerate(self.policies) if isinstance(p, FallbackPolicy) + ] + + if fallback_idx_policy: + fallback_idx, fallback_policy = fallback_idx_policy[0] + + logger.debug( + f"Action 'action_listen' was predicted after " + f"a user message using {policy_name}. Predicting " + f"fallback action: {fallback_policy.fallback_action_name}" + ) + + probabilities = fallback_policy.fallback_scores(domain) + policy_name = f"policy_{fallback_idx}_{type(fallback_policy).__name__}" + + return probabilities, policy_name + + def probabilities_using_best_policy( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> Tuple[Optional[List[float]], Optional[Text]]: + """Predicts the next action the bot should take after seeing the tracker. + + Picks the best policy prediction based on probabilities and policy priority. + Triggers fallback if `action_listen` is predicted after a user utterance. + + Args: + tracker: the :class:`rasa.core.trackers.DialogueStateTracker` + domain: the :class:`rasa.core.domain.Domain` + interpreter: Interpreter which may be used by the policies to create + additional features. + + Returns: + best_probabilities: the list of probabilities for the next actions + best_policy_name: the name of the picked policy + """ + + probabilities, policy_name = self._best_policy_prediction( + tracker, domain, interpreter + ) if ( - result is not None - and result.index(max_confidence) + tracker.latest_action_name == ACTION_LISTEN_NAME + and probabilities is not None + and probabilities.index(max(probabilities)) == domain.index_for_action(ACTION_LISTEN_NAME) - and tracker.latest_action_name == ACTION_LISTEN_NAME - and self.is_not_memo_policy(best_policy_name) + and self.is_not_memo_policy(policy_name, max(probabilities)) ): - # Trigger the fallback policy when ActionListen is predicted after - # a user utterance. This is done on the condition that: - # - a fallback policy is present, - # - there was just a user message and the predicted - # action is action_listen by a policy - # other than the MemoizationPolicy - - fallback_idx_policy = [ - (i, p) - for i, p in enumerate(self.policies) - if isinstance(p, FallbackPolicy) - ] - - if fallback_idx_policy: - fallback_idx, fallback_policy = fallback_idx_policy[0] - - logger.debug( - "Action 'action_listen' was predicted after " - "a user message using {}. " - "Predicting fallback action: {}" - "".format(best_policy_name, fallback_policy.fallback_action_name) - ) + probabilities, policy_name = self._fallback_after_listen( + domain, probabilities, policy_name + ) - result = fallback_policy.fallback_scores(domain) - best_policy_name = "policy_{}_{}".format( - fallback_idx, type(fallback_policy).__name__ - ) + logger.debug(f"Predicted next action using {policy_name}") + return probabilities, policy_name + + +def _check_policy_for_forms_available( + domain: Domain, ensemble: Optional["PolicyEnsemble"] +) -> None: + if not ensemble: + return + + from rasa.core.policies.form_policy import FormPolicy - logger.debug("Predicted next action using {}".format(best_policy_name)) - return result, best_policy_name + suited_policies_for_forms = (FormPolicy, RulePolicy) + + has_policy_for_forms = ensemble is not None and any( + isinstance(policy, suited_policies_for_forms) for policy in ensemble.policies + ) + + if domain.form_names and not has_policy_for_forms: + raise InvalidDomain( + "You have defined a form action, but haven't added the " + "FormPolicy to your policy ensemble. Either remove all " + "forms from your domain or exclude the FormPolicy from your " + "policy configuration." + ) class InvalidPolicyConfig(Exception): diff --git a/rasa/core/policies/fallback.py b/rasa/core/policies/fallback.py index 1f40f27adb30..cb4f1cf855c2 100644 --- a/rasa/core/policies/fallback.py +++ b/rasa/core/policies/fallback.py @@ -3,12 +3,14 @@ import os from typing import Any, List, Text, Optional, Dict, Tuple -from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.constants import DOCS_URL_MIGRATION_GUIDE +from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_DEFAULT_FALLBACK_NAME import rasa.utils.io +from rasa.utils import common as common_utils -from rasa.core import utils from rasa.core.domain import Domain +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.policies.policy import Policy from rasa.core.trackers import DialogueStateTracker from rasa.core.constants import FALLBACK_POLICY_PRIORITY @@ -24,7 +26,7 @@ class FallbackPolicy(Policy): prediction. """ @staticmethod - def _standard_featurizer(): + def _standard_featurizer() -> None: return None def __init__( @@ -33,7 +35,7 @@ def __init__( nlu_threshold: float = 0.3, ambiguity_threshold: float = 0.1, core_threshold: float = 0.3, - fallback_action_name: Text = "action_default_fallback", + fallback_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME, ) -> None: """Create a new Fallback policy. @@ -49,18 +51,26 @@ def __init__( between confidences of the top two predictions fallback_action_name: name of the action to execute as a fallback """ - super(FallbackPolicy, self).__init__(priority=priority) + super().__init__(priority=priority) self.nlu_threshold = nlu_threshold self.ambiguity_threshold = ambiguity_threshold self.core_threshold = core_threshold self.fallback_action_name = fallback_action_name + common_utils.raise_warning( + f"'{self.__class__.__name__}' is deprecated and will be removed " + "in the future. It is recommended to use the 'RulePolicy' instead.", + category=FutureWarning, + docs=DOCS_URL_MIGRATION_GUIDE, + ) + def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: """Does nothing. This policy is deterministic.""" @@ -123,16 +133,22 @@ def should_nlu_fallback( return False - def fallback_scores(self, domain, fallback_score=1.0): + def fallback_scores( + self, domain: Domain, fallback_score: float = 1.0 + ) -> List[float]: """Prediction scores used if a fallback is necessary.""" - result = [0.0] * domain.num_actions + result = self._default_predictions(domain) idx = domain.index_for_action(self.fallback_action_name) result[idx] = fallback_score return result def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: """Predicts a fallback action. @@ -142,8 +158,16 @@ def predict_action_probabilities( nlu_data = tracker.latest_message.parse_data - if tracker.latest_action_name == self.fallback_action_name: - result = [0.0] * domain.num_actions + if ( + tracker.latest_action_name == self.fallback_action_name + and tracker.latest_action_name != ACTION_LISTEN_NAME + ): + logger.debug( + "Predicted 'action_listen' after fallback action '{}'".format( + self.fallback_action_name + ) + ) + result = self._default_predictions(domain) idx = domain.index_for_action(ACTION_LISTEN_NAME) result[idx] = 1.0 @@ -177,7 +201,7 @@ def persist(self, path: Text) -> None: "fallback_action_name": self.fallback_action_name, } rasa.utils.io.create_directory_for_file(config_file) - utils.dump_obj_as_json_to_file(config_file, meta) + rasa.utils.io.dump_obj_as_json_to_file(config_file, meta) @classmethod def load(cls, path: Text) -> "FallbackPolicy": diff --git a/rasa/core/policies/form_policy.py b/rasa/core/policies/form_policy.py index 53ad5b01f53d..a221f5411cc8 100644 --- a/rasa/core/policies/form_policy.py +++ b/rasa/core/policies/form_policy.py @@ -1,17 +1,17 @@ import logging -import typing -from typing import List, Optional, Dict, Text, Optional +from typing import List, Dict, Text, Optional, Any +from rasa.constants import DOCS_URL_MIGRATION_GUIDE from rasa.core.actions.action import ACTION_LISTEN_NAME -from rasa.core.domain import PREV_PREFIX, ACTIVE_FORM_PREFIX, Domain, InvalidDomain +from rasa.core.domain import PREV_PREFIX, ACTIVE_FORM_PREFIX, Domain from rasa.core.events import FormValidation from rasa.core.featurizers import TrackerFeaturizer +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.policies.memoization import MemoizationPolicy from rasa.core.trackers import DialogueStateTracker from rasa.core.constants import FORM_POLICY_PRIORITY -if typing.TYPE_CHECKING: - from rasa.core.policies.ensemble import PolicyEnsemble +from rasa.utils import common as common_utils logger = logging.getLogger(__name__) @@ -31,30 +31,19 @@ def __init__( # max history is set to 2 in order to capture # previous meaningful action before action listen - super(FormPolicy, self).__init__( + super().__init__( featurizer=featurizer, priority=priority, max_history=2, lookup=lookup ) - @classmethod - def validate_against_domain( - cls, ensemble: Optional["PolicyEnsemble"], domain: Optional[Domain] - ) -> None: - if not domain: - return - - has_form_policy = ensemble is not None and any( - isinstance(p, cls) for p in ensemble.policies + common_utils.raise_warning( + f"'{FormPolicy.__name__}' is deprecated and will be removed in " + "in the future. It is recommended to use the 'RulePolicy' instead.", + category=FutureWarning, + docs=DOCS_URL_MIGRATION_GUIDE, ) - if domain.form_names and not has_form_policy: - raise InvalidDomain( - "You have defined a form action, but haven't added the " - "FormPolicy to your policy ensemble. Either remove all " - "forms from your domain or exclude the FormPolicy from your " - "policy configuration." - ) @staticmethod - def _get_active_form_name(state): + def _get_active_form_name(state: Dict[Text, float]) -> Optional[Text]: found_forms = [ state_name[len(ACTIVE_FORM_PREFIX) :] for state_name, prob in state.items() @@ -64,14 +53,16 @@ def _get_active_form_name(state): return found_forms[0] if found_forms else None @staticmethod - def _prev_action_listen_in_state(state): + def _prev_action_listen_in_state(state: Dict[Text, float]) -> bool: return any( PREV_PREFIX + ACTION_LISTEN_NAME in state_name and prob > 0 for state_name, prob in state.items() ) @staticmethod - def _modified_states(states): + def _modified_states( + states: List[Dict[Text, float]] + ) -> List[Optional[Dict[Text, float]]]: """Modify the states to - capture previous meaningful action before action_listen - ignore previous intent @@ -87,10 +78,13 @@ def _modified_states(states): return [action_before_listen, states[-1]] - def _add_states_to_lookup( - self, trackers_as_states, trackers_as_actions, domain, online=False - ): + def _create_lookup_from_states( + self, + trackers_as_states: List[List[Dict]], + trackers_as_actions: List[List[Text]], + ) -> Dict[Text, Text]: """Add states to lookup dict""" + lookup = {} for states in trackers_as_states: active_form = self._get_active_form_name(states[-1]) if active_form and self._prev_action_listen_in_state(states[-1]): @@ -100,18 +94,19 @@ def _add_states_to_lookup( # even if there are two identical feature keys # their form will be the same # because of `active_form_...` feature - self.lookup[feature_key] = active_form + lookup[feature_key] = active_form + return lookup def recall( self, states: List[Dict[Text, float]], tracker: DialogueStateTracker, domain: Domain, - ) -> Optional[int]: + ) -> Optional[Text]: # modify the states return self._recall_states(self._modified_states(states)) - def state_is_unhappy(self, tracker, domain): + def state_is_unhappy(self, tracker: DialogueStateTracker, domain: Domain) -> bool: # since it is assumed that training stories contain # only unhappy paths, notify the form that # it should not be validated if predicted by other policy @@ -122,7 +117,7 @@ def state_is_unhappy(self, tracker, domain): state_is_unhappy = ( memorized_form is not None - and memorized_form == tracker.active_form.get("name") + and memorized_form == tracker.active_loop.get("name") ) if state_is_unhappy: @@ -135,30 +130,35 @@ def state_is_unhappy(self, tracker, domain): return state_is_unhappy def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: """Predicts the corresponding form action if there is an active form""" - result = [0.0] * domain.num_actions + result = self._default_predictions(domain) - if tracker.active_form.get("name"): + if tracker.active_loop.get("name"): logger.debug( - "There is an active form '{}'".format(tracker.active_form["name"]) + "There is an active form '{}'".format(tracker.active_loop["name"]) ) if tracker.latest_action_name == ACTION_LISTEN_NAME: # predict form action after user utterance - if tracker.active_form.get("rejected"): + if tracker.active_loop.get("rejected"): if self.state_is_unhappy(tracker, domain): tracker.update(FormValidation(False)) return result - idx = domain.index_for_action(tracker.active_form["name"]) - result[idx] = 1.0 + result = self._prediction_result( + tracker.active_loop["name"], tracker, domain + ) - elif tracker.latest_action_name == tracker.active_form.get("name"): + elif tracker.latest_action_name == tracker.active_loop.get("name"): # predict action_listen after form action - idx = domain.index_for_action(ACTION_LISTEN_NAME) - result[idx] = 1.0 + result = self._prediction_result(ACTION_LISTEN_NAME, tracker, domain) + else: logger.debug("There is no active form") diff --git a/rasa/core/policies/keras_policy.py b/rasa/core/policies/keras_policy.py deleted file mode 100644 index 4c3542b2ff59..000000000000 --- a/rasa/core/policies/keras_policy.py +++ /dev/null @@ -1,342 +0,0 @@ -import copy -import json -import logging -import os -import tensorflow as tf -import numpy as np -import warnings -import typing -from typing import Any, List, Dict, Text, Optional, Tuple - -import rasa.utils.io - -from rasa.core import utils -from rasa.core.domain import Domain -from rasa.core.featurizers import ( - MaxHistoryTrackerFeaturizer, - BinarySingleStateFeaturizer, -) -from rasa.core.featurizers import TrackerFeaturizer -from rasa.core.policies.policy import Policy -from rasa.core.trackers import DialogueStateTracker -from rasa.utils.common import obtain_verbosity -from rasa.utils.train_utils import load_tf_config -from rasa.core.constants import DEFAULT_POLICY_PRIORITY - -# there are a number of issues with imports from tensorflow. hence the deactivation -# pytype: disable=import-error -# pytype: disable=module-attr -try: - import cPickle as pickle -except ImportError: - import pickle - - -logger = logging.getLogger(__name__) - - -class KerasPolicy(Policy): - SUPPORTS_ONLINE_TRAINING = True - - defaults = { - # Neural Net and training params - "rnn_size": 32, - "epochs": 100, - "batch_size": 32, - "validation_split": 0.1, - # set random seed to any int to get reproducible results - "random_seed": None, - } - - @staticmethod - def _standard_featurizer(max_history=None): - return MaxHistoryTrackerFeaturizer( - BinarySingleStateFeaturizer(), max_history=max_history - ) - - def __init__( - self, - featurizer: Optional[TrackerFeaturizer] = None, - priority: int = DEFAULT_POLICY_PRIORITY, - model: Optional[tf.keras.models.Sequential] = None, - graph: Optional[tf.Graph] = None, - session: Optional[tf.Session] = None, - current_epoch: int = 0, - max_history: Optional[int] = None, - **kwargs: Any - ) -> None: - if not featurizer: - featurizer = self._standard_featurizer(max_history) - super(KerasPolicy, self).__init__(featurizer, priority) - - self._load_params(**kwargs) - self.model = model - # by default keras uses default tf graph and global tf session - # we are going to either load them or create them in train(...) - self.graph = graph - self.session = session - - self.current_epoch = current_epoch - - def _load_params(self, **kwargs: Dict[Text, Any]) -> None: - config = copy.deepcopy(self.defaults) - config.update(kwargs) - - # filter out kwargs that are used explicitly - self._tf_config = load_tf_config(config) - self.rnn_size = config.pop("rnn_size") - self.epochs = config.pop("epochs") - self.batch_size = config.pop("batch_size") - self.validation_split = config.pop("validation_split") - self.random_seed = config.pop("random_seed") - - self._train_params = config - - @property - def max_len(self): - if self.model: - return self.model.layers[0].batch_input_shape[1] - else: - return None - - def _build_model(self, num_features, num_actions, max_history_len): - warnings.warn( - "Deprecated, use `model_architecture` instead.", - DeprecationWarning, - stacklevel=2, - ) - return - - def model_architecture( - self, input_shape: Tuple[int, int], output_shape: Tuple[int, Optional[int]] - ) -> tf.keras.models.Sequential: - """Build a keras model and return a compiled model.""" - - from tensorflow.keras.models import Sequential - from tensorflow.keras.layers import ( - Masking, - LSTM, - Dense, - TimeDistributed, - Activation, - ) - - # Build Model - model = Sequential() - - # the shape of the y vector of the labels, - # determines which output from rnn will be used - # to calculate the loss - if len(output_shape) == 1: - # y is (num examples, num features) so - # only the last output from the rnn is used to - # calculate the loss - model.add(Masking(mask_value=-1, input_shape=input_shape)) - model.add(LSTM(self.rnn_size, dropout=0.2)) - model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1])) - elif len(output_shape) == 2: - # y is (num examples, max_dialogue_len, num features) so - # all the outputs from the rnn are used to - # calculate the loss, therefore a sequence is returned and - # time distributed layer is used - - # the first value in input_shape is max dialogue_len, - # it is set to None, to allow dynamic_rnn creation - # during prediction - model.add(Masking(mask_value=-1, input_shape=(None, input_shape[1]))) - model.add(LSTM(self.rnn_size, return_sequences=True, dropout=0.2)) - model.add(TimeDistributed(Dense(units=output_shape[-1]))) - else: - raise ValueError( - "Cannot construct the model because" - "length of output_shape = {} " - "should be 1 or 2." - "".format(len(output_shape)) - ) - - model.add(Activation("softmax")) - - model.compile( - loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"] - ) - - if obtain_verbosity() > 0: - model.summary() - - return model - - def train( - self, - training_trackers: List[DialogueStateTracker], - domain: Domain, - **kwargs: Any - ) -> None: - - # set numpy random seed - np.random.seed(self.random_seed) - - training_data = self.featurize_for_training(training_trackers, domain, **kwargs) - # noinspection PyPep8Naming - shuffled_X, shuffled_y = training_data.shuffled_X_y() - - self.graph = tf.Graph() - with self.graph.as_default(): - # set random seed in tf - tf.set_random_seed(self.random_seed) - self.session = tf.Session(config=self._tf_config) - - with self.session.as_default(): - if self.model is None: - self.model = self.model_architecture( - shuffled_X.shape[1:], shuffled_y.shape[1:] - ) - - logger.info( - "Fitting model with {} total samples and a " - "validation split of {}" - "".format(training_data.num_examples(), self.validation_split) - ) - - # filter out kwargs that cannot be passed to fit - self._train_params = self._get_valid_params( - self.model.fit, **self._train_params - ) - - self.model.fit( - shuffled_X, - shuffled_y, - epochs=self.epochs, - batch_size=self.batch_size, - shuffle=False, - verbose=obtain_verbosity(), - **self._train_params - ) - # the default parameter for epochs in keras fit is 1 - self.current_epoch = self.defaults.get("epochs", 1) - logger.info("Done fitting keras policy model") - - def continue_training( - self, - training_trackers: List[DialogueStateTracker], - domain: Domain, - **kwargs: Any - ) -> None: - """Continues training an already trained policy.""" - - # takes the new example labelled and learns it - # via taking `epochs` samples of n_batch-1 parts of the training data, - # inserting our new example and learning them. this means that we can - # ask the network to fit the example without overemphasising - # its importance (and therefore throwing off the biases) - - batch_size = kwargs.get("batch_size", 5) - epochs = kwargs.get("epochs", 50) - - with self.graph.as_default(), self.session.as_default(): - for _ in range(epochs): - training_data = self._training_data_for_continue_training( - batch_size, training_trackers, domain - ) - - # fit to one extra example using updated trackers - self.model.fit( - training_data.X, - training_data.y, - epochs=self.current_epoch + 1, - batch_size=len(training_data.y), - verbose=obtain_verbosity(), - initial_epoch=self.current_epoch, - ) - - self.current_epoch += 1 - - def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain - ) -> List[float]: - - # noinspection PyPep8Naming - X = self.featurizer.create_X([tracker], domain) - - with self.graph.as_default(), self.session.as_default(): - y_pred = self.model.predict(X, batch_size=1) - - if len(y_pred.shape) == 2: - return y_pred[-1].tolist() - elif len(y_pred.shape) == 3: - return y_pred[0, -1].tolist() - else: - raise Exception("Network prediction has invalid shape.") - - def persist(self, path: Text) -> None: - - if self.model: - self.featurizer.persist(path) - - meta = { - "priority": self.priority, - "model": "keras_model.h5", - "epochs": self.current_epoch, - } - - meta_file = os.path.join(path, "keras_policy.json") - utils.dump_obj_as_json_to_file(meta_file, meta) - - model_file = os.path.join(path, meta["model"]) - # makes sure the model directory exists - rasa.utils.io.create_directory_for_file(model_file) - with self.graph.as_default(), self.session.as_default(): - self.model.save(model_file, overwrite=True) - - tf_config_file = os.path.join(path, "keras_policy.tf_config.pkl") - with open(tf_config_file, "wb") as f: - pickle.dump(self._tf_config, f) - else: - warnings.warn( - "Method `persist(...)` was called " - "without a trained model present. " - "Nothing to persist then!" - ) - - @classmethod - def load(cls, path: Text) -> "KerasPolicy": - from tensorflow.keras.models import load_model - - if os.path.exists(path): - featurizer = TrackerFeaturizer.load(path) - meta_file = os.path.join(path, "keras_policy.json") - if os.path.isfile(meta_file): - meta = json.loads(rasa.utils.io.read_file(meta_file)) - - tf_config_file = os.path.join(path, "keras_policy.tf_config.pkl") - with open(tf_config_file, "rb") as f: - _tf_config = pickle.load(f) - - model_file = os.path.join(path, meta["model"]) - - graph = tf.Graph() - with graph.as_default(): - session = tf.Session(config=_tf_config) - with session.as_default(): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - model = load_model(model_file) - - return cls( - featurizer=featurizer, - priority=meta["priority"], - model=model, - graph=graph, - session=session, - current_epoch=meta["epochs"], - ) - else: - return cls(featurizer=featurizer) - else: - raise Exception( - "Failed to load dialogue model. Path {} " - "doesn't exist".format(os.path.abspath(path)) - ) - - -# pytype: enable=import-error -# pytype: disable=module-attr diff --git a/rasa/core/policies/mapping_policy.py b/rasa/core/policies/mapping_policy.py index 197420714799..6749eeb64796 100644 --- a/rasa/core/policies/mapping_policy.py +++ b/rasa/core/policies/mapping_policy.py @@ -4,20 +4,29 @@ import typing from typing import Any, List, Text, Optional +from rasa.constants import DOCS_URL_POLICIES, DOCS_URL_MIGRATION_GUIDE import rasa.utils.io +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils import common as common_utils -from rasa.core import utils from rasa.core.actions.action import ( ACTION_BACK_NAME, ACTION_LISTEN_NAME, ACTION_RESTART_NAME, + ACTION_SESSION_START_NAME, +) +from rasa.core.constants import ( + USER_INTENT_BACK, + USER_INTENT_RESTART, + USER_INTENT_SESSION_START, ) -from rasa.core.constants import USER_INTENT_BACK, USER_INTENT_RESTART from rasa.core.domain import Domain, InvalidDomain from rasa.core.events import ActionExecuted +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.policies.policy import Policy from rasa.core.trackers import DialogueStateTracker from rasa.core.constants import MAPPING_POLICY_PRIORITY +from rasa.utils.common import raise_warning if typing.TYPE_CHECKING: from rasa.core.policies.ensemble import PolicyEnsemble @@ -31,16 +40,24 @@ class MappingPolicy(Policy): Intents can be assigned actions in the domain file which are to be executed whenever the intent is detected. This policy takes precedence over - any other policy.""" + any other policy. + """ @staticmethod - def _standard_featurizer(): + def _standard_featurizer() -> None: return None def __init__(self, priority: int = MAPPING_POLICY_PRIORITY) -> None: """Create a new Mapping policy.""" - super(MappingPolicy, self).__init__(priority=priority) + super().__init__(priority=priority) + + common_utils.raise_warning( + f"'{MappingPolicy.__name__}' is deprecated and will be removed in " + "the future. It is recommended to use the 'RulePolicy' instead.", + category=FutureWarning, + docs=DOCS_URL_MIGRATION_GUIDE, + ) @classmethod def validate_against_domain( @@ -70,14 +87,19 @@ def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: """Does nothing. This policy is deterministic.""" pass def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: """Predicts the assigned action. @@ -85,40 +107,47 @@ def predict_action_probabilities( predicted with the highest probability of all policies. If it is not the policy will predict zero for every action.""" - prediction = [0.0] * domain.num_actions - intent = tracker.latest_message.intent.get("name") + result = self._default_predictions(domain) + + intent = tracker.latest_message.intent.get(INTENT_NAME_KEY) if intent == USER_INTENT_RESTART: action = ACTION_RESTART_NAME elif intent == USER_INTENT_BACK: action = ACTION_BACK_NAME + elif intent == USER_INTENT_SESSION_START: + action = ACTION_SESSION_START_NAME else: action = domain.intent_properties.get(intent, {}).get("triggers") if tracker.latest_action_name == ACTION_LISTEN_NAME: + # predict mapped action if action: idx = domain.index_for_action(action) if idx is None: - logger.warning( - "MappingPolicy tried to predict unknown " - "action '{}'.".format(action) + raise_warning( + f"MappingPolicy tried to predict unknown " + f"action '{action}'. Make sure all mapped actions are " + f"listed in the domain.", + docs=DOCS_URL_POLICIES + "#mapping-policy", ) else: - prediction[idx] = 1 + result[idx] = 1 - if any(prediction): + if any(result): logger.debug( "The predicted intent '{}' is mapped to " " action '{}' in the domain." "".format(intent, action) ) elif tracker.latest_action_name == action and action is not None: + # predict next action_listen after mapped action latest_action = tracker.get_last_event_for(ActionExecuted) assert latest_action.action_name == action - if latest_action.policy == type( - self - ).__name__ or latest_action.policy.endswith("_" + type(self).__name__): - # this ensures that we only predict listen, if we predicted - # the mapped action + if latest_action.policy and latest_action.policy.endswith( + type(self).__name__ + ): + # this ensures that we only predict listen, + # if we predicted the mapped action logger.debug( "The mapped action, '{}', for this intent, '{}', was " "executed last so MappingPolicy is returning to " @@ -126,25 +155,25 @@ def predict_action_probabilities( ) idx = domain.index_for_action(ACTION_LISTEN_NAME) - prediction[idx] = 1 + result[idx] = 1 else: logger.debug( - "The mapped action, '{}', for this intent, '{}', was " - "executed last, but it was predicted by another policy, '{}', so MappingPolicy is not" - "predicting any action.".format( + "The mapped action, '{}', for the intent, '{}', was " + "executed last, but it was predicted by another policy, '{}', " + "so MappingPolicy is not predicting any action.".format( action, intent, latest_action.policy ) ) elif action == ACTION_RESTART_NAME: - idx = domain.index_for_action(ACTION_RESTART_NAME) - prediction[idx] = 1 logger.debug("Restarting the conversation with action_restart.") + idx = domain.index_for_action(ACTION_RESTART_NAME) + result[idx] = 1 else: logger.debug( "There is no mapped action for the predicted intent, " "'{}'.".format(intent) ) - return prediction + return result def persist(self, path: Text) -> None: """Only persists the priority.""" @@ -152,7 +181,7 @@ def persist(self, path: Text) -> None: config_file = os.path.join(path, "mapping_policy.json") meta = {"priority": self.priority} rasa.utils.io.create_directory_for_file(config_file) - utils.dump_obj_as_json_to_file(config_file, meta) + rasa.utils.io.dump_obj_as_json_to_file(config_file, meta) @classmethod def load(cls, path: Text) -> "MappingPolicy": diff --git a/rasa/core/policies/memoization.py b/rasa/core/policies/memoization.py index 5c318ecf6c63..daf8e67255e7 100644 --- a/rasa/core/policies/memoization.py +++ b/rasa/core/policies/memoization.py @@ -9,10 +9,10 @@ import rasa.utils.io -from rasa.core import utils from rasa.core.domain import Domain from rasa.core.events import ActionExecuted from rasa.core.featurizers import TrackerFeaturizer, MaxHistoryTrackerFeaturizer +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.policies.policy import Policy from rasa.core.trackers import DialogueStateTracker from rasa.utils.common import is_logging_disabled @@ -49,7 +49,9 @@ class MemoizationPolicy(Policy): USE_NLU_CONFIDENCE_AS_SCORE = False @staticmethod - def _standard_featurizer(max_history=None): + def _standard_featurizer( + max_history: Optional[int] = None, + ) -> MaxHistoryTrackerFeaturizer: # Memoization policy always uses MaxHistoryTrackerFeaturizer # without state_featurizer return MaxHistoryTrackerFeaturizer( @@ -65,34 +67,53 @@ def __init__( max_history: Optional[int] = None, lookup: Optional[Dict] = None, ) -> None: + """Initialize the policy. + + Args: + featurizer: tracker featurizer + priority: the priority of the policy + max_history: maximum history to take into account when featurizing trackers + lookup: a dictionary that stores featurized tracker states and + predicted actions for them + """ if not featurizer: featurizer = self._standard_featurizer(max_history) - super(MemoizationPolicy, self).__init__(featurizer, priority) + super().__init__(featurizer, priority) self.max_history = self.featurizer.max_history self.lookup = lookup if lookup is not None else {} - self.is_enabled = True - def toggle(self, activate: bool) -> None: - self.is_enabled = activate + def _create_lookup_from_states( + self, + trackers_as_states: List[List[Dict]], + trackers_as_actions: List[List[Text]], + ) -> Dict[Text, Text]: + """Creates lookup dictionary from the tracker represented as states. + + Args: + trackers_as_states: representation of the trackers as a list of states + trackers_as_actions: representation of the trackers as a list of actions + + Returns: + lookup dictionary + """ + + lookup = {} - def _add_states_to_lookup( - self, trackers_as_states, trackers_as_actions, domain, online=False - ): - """Add states to lookup dict""" if not trackers_as_states: - return + return lookup - assert len(trackers_as_states[0]) == self.max_history, ( - "Trying to mem featurized data with {} historic turns. Expected: " - "{}".format(len(trackers_as_states[0]), self.max_history) - ) + if self.max_history: + assert len(trackers_as_states[0]) == self.max_history, ( + f"Trying to memorizefeaturized data with {len(trackers_as_states[0])} " + f"historic turns. Expected: {self.max_history}" + ) assert len(trackers_as_actions[0]) == 1, ( - "The second dimension of trackers_as_action should be 1, " - "instead of {}".format(len(trackers_as_actions[0])) + f"The second dimension of trackers_as_action should be 1, " + f"instead of {len(trackers_as_actions[0])}" ) ambiguous_feature_keys = set() @@ -106,36 +127,27 @@ def _add_states_to_lookup( action = actions[0] feature_key = self._create_feature_key(states) - feature_item = domain.index_for_action(action) if feature_key not in ambiguous_feature_keys: - if feature_key in self.lookup.keys(): - if self.lookup[feature_key] != feature_item: - if online: - logger.info( - "Original stories are " - "different for {} -- {}\n" - "Memorized the new ones for " - "now. Delete contradicting " - "examples after exporting " - "the new stories." - "".format(states, action) - ) - self.lookup[feature_key] = feature_item - else: - # delete contradicting example created by - # partial history augmentation from memory - ambiguous_feature_keys.add(feature_key) - del self.lookup[feature_key] + if feature_key in lookup.keys(): + if lookup[feature_key] != action: + # delete contradicting example created by + # partial history augmentation from memory + ambiguous_feature_keys.add(feature_key) + del lookup[feature_key] else: - self.lookup[feature_key] = feature_item - pbar.set_postfix({"# examples": "{:d}".format(len(self.lookup))}) + lookup[feature_key] = action + pbar.set_postfix({"# examples": "{:d}".format(len(lookup))}) + + return lookup + + def _create_feature_key(self, states: List[Dict]) -> Text: + from rasa.utils import io - def _create_feature_key(self, states): feature_str = json.dumps(states, sort_keys=True).replace('"', "") if self.ENABLE_FEATURE_STRING_COMPRESSION: - compressed = zlib.compress(bytes(feature_str, "utf-8")) - return base64.b64encode(compressed).decode("utf-8") + compressed = zlib.compress(bytes(feature_str, io.DEFAULT_ENCODING)) + return base64.b64encode(compressed).decode(io.DEFAULT_ENCODING) else: return feature_str @@ -143,10 +155,9 @@ def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: - """Trains the policy on given training trackers.""" - self.lookup = {} # only considers original trackers (no augmented ones) training_trackers = [ t @@ -157,24 +168,12 @@ def train( trackers_as_states, trackers_as_actions, ) = self.featurizer.training_states_and_actions(training_trackers, domain) - self._add_states_to_lookup(trackers_as_states, trackers_as_actions, domain) - logger.debug("Memorized {} unique examples.".format(len(self.lookup))) - - def continue_training( - self, - training_trackers: List[DialogueStateTracker], - domain: Domain, - **kwargs: Any - ) -> None: - - # add only the last tracker, because it is the only new one - ( - trackers_as_states, - trackers_as_actions, - ) = self.featurizer.training_states_and_actions(training_trackers[-1:], domain) - self._add_states_to_lookup(trackers_as_states, trackers_as_actions, domain) + self.lookup = self._create_lookup_from_states( + trackers_as_states, trackers_as_actions + ) + logger.debug(f"Memorized {len(self.lookup)} unique examples.") - def _recall_states(self, states: List[Dict[Text, float]]) -> Optional[int]: + def _recall_states(self, states: List[Dict[Text, float]]) -> Optional[Text]: return self.lookup.get(self._create_feature_key(states)) @@ -183,31 +182,15 @@ def recall( states: List[Dict[Text, float]], tracker: DialogueStateTracker, domain: Domain, - ) -> Optional[int]: + ) -> Optional[Text]: return self._recall_states(states) - def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + def _prediction_result( + self, action_name: Text, tracker: DialogueStateTracker, domain: Domain ) -> List[float]: - """Predicts the next action the bot should take - after seeing the tracker. - - Returns the list of probabilities for the next actions. - If memorized action was found returns 1.1 for its index, - else returns 0.0 for all actions.""" - result = [0.0] * domain.num_actions - - if not self.is_enabled: - return result - - tracker_as_states = self.featurizer.prediction_states([tracker], domain) - states = tracker_as_states[0] - logger.debug("Current tracker state {}".format(states)) - recalled = self.recall(states, tracker, domain) - if recalled is not None: - logger.debug("There is a memorised next action '{}'".format(recalled)) - + result = self._default_predictions(domain) + if action_name: if self.USE_NLU_CONFIDENCE_AS_SCORE: # the memoization will use the confidence of NLU on the latest # user message to set the confidence of the action @@ -215,7 +198,26 @@ def predict_action_probabilities( else: score = 1.0 - result[recalled] = score + result[domain.index_for_action(action_name)] = score + + return result + + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: + result = self._default_predictions(domain) + + tracker_as_states = self.featurizer.prediction_states([tracker], domain) + states = tracker_as_states[0] + logger.debug(f"Current tracker state {states}") + predicted_action_name = self.recall(states, tracker, domain) + if predicted_action_name is not None: + logger.debug(f"There is a memorised next action '{predicted_action_name}'") + result = self._prediction_result(predicted_action_name, tracker, domain) else: logger.debug("There is no memorised next action") @@ -232,7 +234,7 @@ def persist(self, path: Text) -> None: "lookup": self.lookup, } rasa.utils.io.create_directory_for_file(memorized_file) - utils.dump_obj_as_json_to_file(memorized_file, data) + rasa.utils.io.dump_obj_as_json_to_file(memorized_file, data) @classmethod def load(cls, path: Text) -> "MemoizationPolicy": @@ -271,7 +273,7 @@ class AugmentedMemoizationPolicy(MemoizationPolicy): """ @staticmethod - def _back_to_the_future_again(tracker): + def _back_to_the_future_again(tracker) -> Optional[DialogueStateTracker]: """Send Marty to the past to get the new featurization for the future""" @@ -301,7 +303,7 @@ def _back_to_the_future_again(tracker): return mcfly_tracker - def _recall_using_delorean(self, old_states, tracker, domain): + def _recall_using_delorean(self, old_states, tracker, domain) -> Optional[Text]: """Recursively go to the past to correctly forget slots, and then back to the future to recall.""" @@ -317,7 +319,7 @@ def _recall_using_delorean(self, old_states, tracker, domain): # check if we like new futures memorised = self._recall_states(states) if memorised is not None: - logger.debug("Current tracker state {}".format(states)) + logger.debug(f"Current tracker state {states}") return memorised old_states = states @@ -325,7 +327,7 @@ def _recall_using_delorean(self, old_states, tracker, domain): mcfly_tracker = self._back_to_the_future_again(mcfly_tracker) # No match found - logger.debug("Current tracker state {}".format(old_states)) + logger.debug(f"Current tracker state {old_states}") return None def recall( @@ -333,11 +335,11 @@ def recall( states: List[Dict[Text, float]], tracker: DialogueStateTracker, domain: Domain, - ) -> Optional[int]: + ) -> Optional[Text]: - recalled = self._recall_states(states) - if recalled is None: + predicted_action_name = self._recall_states(states) + if predicted_action_name is None: # let's try a different method to recall that tracker return self._recall_using_delorean(states, tracker, domain) else: - return recalled + return predicted_action_name diff --git a/rasa/core/policies/policy.py b/rasa/core/policies/policy.py index 5fad908cb5ed..026bb207febe 100644 --- a/rasa/core/policies/policy.py +++ b/rasa/core/policies/policy.py @@ -1,7 +1,7 @@ import copy import logging -import tensorflow as tf -from typing import Any, List, Optional, Text, Dict, Callable +from enum import Enum +from typing import Any, List, Optional, Text, Dict, Callable, Type, Union import rasa.utils.common from rasa.core.domain import Domain @@ -10,6 +10,7 @@ BinarySingleStateFeaturizer, ) from rasa.core.featurizers import TrackerFeaturizer +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.trackers import DialogueStateTracker from rasa.core.training.data import DialogueTrainingData from rasa.core.constants import DEFAULT_POLICY_PRIORITY @@ -18,15 +19,66 @@ logger = logging.getLogger(__name__) -class Policy(object): +class SupportedData(Enum): + """Enumeration of a policy's supported training data type.""" + + # policy only supports ML-based training data ("stories") + ML_DATA = 1 + + # policy only supports rule-based data ("rules") + RULE_DATA = 2 + + # policy supports both ML-based and rule-based data ("stories" as well as "rules") + ML_AND_RULE_DATA = 3 + + @staticmethod + def trackers_for_policy( + policy: Union["Policy", Type["Policy"]], trackers: List[DialogueStateTracker] + ) -> List[DialogueStateTracker]: + """Return trackers for a given policy. + + Args: + policy: Policy or policy type to return trackers for. + trackers: Trackers to split. + + Returns: + Trackers from ML-based training data and/or rule-based data. + """ + supported_data = policy.supported_data() + + if supported_data == SupportedData.RULE_DATA: + return [tracker for tracker in trackers if tracker.is_rule_tracker] + + if supported_data == SupportedData.ML_DATA: + return [tracker for tracker in trackers if not tracker.is_rule_tracker] + + # `supported_data` is `SupportedData.ML_AND_RULE_DATA` + return trackers + + +class Policy: SUPPORTS_ONLINE_TRAINING = False @staticmethod - def _standard_featurizer(): + def supported_data() -> SupportedData: + """The type of data supported by this policy. + + By default, this is only ML-based training data. If policies support rule data, + or both ML-based data and rule data, they need to override this method. + + Returns: + The data type supported by this policy (ML-based training data). + """ + return SupportedData.ML_DATA + + @staticmethod + def _standard_featurizer() -> MaxHistoryTrackerFeaturizer: return MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer()) @classmethod - def _create_featurizer(cls, featurizer=None): + def _create_featurizer( + cls, featurizer: Optional[TrackerFeaturizer] = None + ) -> TrackerFeaturizer: if featurizer: return copy.deepcopy(featurizer) else: @@ -46,27 +98,43 @@ def featurizer(self): @staticmethod def _get_valid_params(func: Callable, **kwargs: Any) -> Dict: - # filter out kwargs that cannot be passed to func + """Filters out kwargs that cannot be passed to func. + + Args: + func: a callable function + + Returns: + the dictionary of parameters + """ + valid_keys = rasa.utils.common.arguments_of(func) params = {key: kwargs.get(key) for key in valid_keys if kwargs.get(key)} ignored_params = { key: kwargs.get(key) for key in kwargs.keys() if not params.get(key) } - logger.debug( - "Parameters ignored by `model.fit(...)`: {}".format(ignored_params) - ) + logger.debug(f"Parameters ignored by `model.fit(...)`: {ignored_params}") return params def featurize_for_training( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + **kwargs: Any, ) -> DialogueTrainingData: """Transform training trackers into a vector representation. + The trackers, consisting of multiple turns, will be transformed - into a float vector which can be used by a ML model.""" + into a float vector which can be used by a ML model. + + Args: + training_trackers: + the list of the :class:`rasa.core.trackers.DialogueStateTracker` + domain: the :class:`rasa.core.domain.Domain` + + Returns: + the :class:`rasa.core.training.data.DialogueTrainingData` + """ training_data = self.featurizer.featurize_trackers(training_trackers, domain) @@ -84,81 +152,89 @@ def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: - """Trains the policy on given training trackers.""" + """Trains the policy on given training trackers. - raise NotImplementedError("Policy must have the capacity to train.") - - def _training_data_for_continue_training( - self, - batch_size: int, - training_trackers: List[DialogueStateTracker], - domain: Domain, - ) -> DialogueTrainingData: - """Creates training_data for `continue_training` by - taking the new labelled example training_trackers[-1:] - and inserting it in batch_size-1 parts of the old training data, + Args: + training_trackers: + the list of the :class:`rasa.core.trackers.DialogueStateTracker` + domain: the :class:`rasa.core.domain.Domain` + interpreter: Interpreter which can be used by the polices for featurization. """ - import numpy as np - - num_samples = batch_size - 1 - num_prev_examples = len(training_trackers) - 1 - sampled_idx = np.random.choice( - range(num_prev_examples), - replace=False, - size=min(num_samples, num_prev_examples), - ) - trackers = [training_trackers[i] for i in sampled_idx] + training_trackers[-1:] - return self.featurize_for_training(trackers, domain) + raise NotImplementedError("Policy must have the capacity to train.") - def continue_training( + def predict_action_probabilities( self, - training_trackers: List[DialogueStateTracker], + tracker: DialogueStateTracker, domain: Domain, - **kwargs: Any - ) -> None: - """Continues training an already trained policy. - - This doesn't need to be supported by every policy. If it is supported, - the policy can be used for online training and the implementation for - the continued training should be put into this function.""" - - pass - - def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: - """Predicts the next action the bot should take - after seeing the tracker. + """Predicts the next action the bot should take after seeing the tracker. + + Args: + tracker: the :class:`rasa.core.trackers.DialogueStateTracker` + domain: the :class:`rasa.core.domain.Domain` + interpreter: Interpreter which may be used by the policies to create + additional features. - Returns the list of probabilities for the next actions""" + Returns: + the list of probabilities for the next actions + """ raise NotImplementedError("Policy must have the capacity to predict.") def persist(self, path: Text) -> None: - """Persists the policy to a storage.""" + """Persists the policy to a storage. + + Args: + path: the path where to save the policy to + """ + raise NotImplementedError("Policy must have the capacity to persist itself.") @classmethod def load(cls, path: Text) -> "Policy": """Loads a policy from the storage. - Needs to load its featurizer""" + + Needs to load its featurizer. + + Args: + path: the path from where to load the policy + """ + raise NotImplementedError("Policy must have the capacity to load itself.") + @staticmethod + def _default_predictions(domain: Domain) -> List[float]: + """Creates a list of zeros. + + Args: + domain: the :class:`rasa.core.domain.Domain` + Returns: + the list of the length of the number of actions + """ + + return [0.0] * domain.num_actions -def confidence_scores_for(action_name, value, domain): + +def confidence_scores_for( + action_name: Text, value: float, domain: Domain +) -> List[float]: """Returns confidence scores if a single action is predicted. Args: - action_name: Name of action for which the score should be set. - value: Confidence for `action_name`. - domain: Domain which contains all actions. - - Returns: List of length `len(nr_actions)`. + action_name: the name of the action for which the score should be set + value: the confidence for `action_name` + domain: the :class:`rasa.core.domain.Domain` + Returns: + the list of the length of the number of actions """ + results = [0.0] * domain.num_actions idx = domain.index_for_action(action_name) results[idx] = value diff --git a/rasa/core/policies/registry.py b/rasa/core/policies/registry.py new file mode 100644 index 000000000000..833fa0bd4ed4 --- /dev/null +++ b/rasa/core/policies/registry.py @@ -0,0 +1,26 @@ +# Import all policies at one place to be able to to resolve them via a common module +# path. Don't do this in `__init__.py` to avoid importing them without need. + +# noinspection PyUnresolvedReferences +from rasa.core.policies.ted_policy import TEDPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.fallback import FallbackPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.memoization import MemoizationPolicy, AugmentedMemoizationPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.sklearn_policy import SklearnPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.form_policy import FormPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.mapping_policy import MappingPolicy + +# noinspection PyUnresolvedReferences +from rasa.core.policies.rule_policy import RulePolicy diff --git a/rasa/core/policies/rule_policy.py b/rasa/core/policies/rule_policy.py new file mode 100644 index 000000000000..cf1f4d5028f4 --- /dev/null +++ b/rasa/core/policies/rule_policy.py @@ -0,0 +1,519 @@ +import logging +from typing import List, Dict, Text, Optional, Any, Set, TYPE_CHECKING + +import re +from collections import defaultdict + +from rasa.core.events import FormValidation +from rasa.core.domain import PREV_PREFIX, ACTIVE_FORM_PREFIX, Domain, InvalidDomain +from rasa.core.featurizers import TrackerFeaturizer +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter +from rasa.core.policies.memoization import MemoizationPolicy +from rasa.core.policies.policy import SupportedData +from rasa.core.trackers import DialogueStateTracker +from rasa.core.constants import ( + FORM_POLICY_PRIORITY, + USER_INTENT_RESTART, + USER_INTENT_BACK, + USER_INTENT_SESSION_START, +) +from rasa.core.actions.action import ( + ACTION_LISTEN_NAME, + ACTION_RESTART_NAME, + ACTION_BACK_NAME, + ACTION_SESSION_START_NAME, + RULE_SNIPPET_ACTION_NAME, + ACTION_DEFAULT_FALLBACK_NAME, +) + +if TYPE_CHECKING: + from rasa.core.policies.ensemble import PolicyEnsemble # pytype: disable=pyi-error + +logger = logging.getLogger(__name__) + +# These are Rasa Open Source default actions and overrule everything at any time. +DEFAULT_ACTION_MAPPINGS = { + USER_INTENT_RESTART: ACTION_RESTART_NAME, + USER_INTENT_BACK: ACTION_BACK_NAME, + USER_INTENT_SESSION_START: ACTION_SESSION_START_NAME, +} + +RULES = "rules" +RULES_FOR_FORM_UNHAPPY_PATH = "rules_for_form_unhappy_path" +DO_NOT_VALIDATE_FORM = "do_not_validate_form" +DO_NOT_PREDICT_FORM_ACTION = "do_not_predict_form_action" + + +class RulePolicy(MemoizationPolicy): + """Policy which handles all the rules""" + + ENABLE_FEATURE_STRING_COMPRESSION = False + + @staticmethod + def supported_data() -> SupportedData: + """The type of data supported by this policy. + + Returns: + The data type supported by this policy (rule data). + """ + return SupportedData.ML_AND_RULE_DATA + + def __init__( + self, + featurizer: Optional[TrackerFeaturizer] = None, + priority: int = FORM_POLICY_PRIORITY, + lookup: Optional[Dict] = None, + core_fallback_threshold: float = 0.3, + core_fallback_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME, + enable_fallback_prediction: bool = True, + ) -> None: + """Create a `RulePolicy` object. + + Args: + featurizer: `Featurizer` which is used to convert conversation states to + features. + priority: Priority of the policy which is used if multiple policies predict + actions with the same confidence. + lookup: Lookup table which is used to pick matching rules for a conversation + state. + core_fallback_threshold: Confidence of the prediction if no rule matched + and de-facto threshold for a core fallback. + core_fallback_action_name: Name of the action which should be predicted + if no rule matched. + enable_fallback_prediction: If `True` `core_fallback_action_name` is + predicted in case no rule matched. + """ + if not featurizer: + # max history is set to `None` in order to capture lengths of rule stories + featurizer = self._standard_featurizer() + featurizer.max_history = None + + self._core_fallback_threshold = core_fallback_threshold + self._fallback_action_name = core_fallback_action_name + self._enable_fallback_prediction = enable_fallback_prediction + + super().__init__(featurizer=featurizer, priority=priority, lookup=lookup) + + @classmethod + def validate_against_domain( + cls, ensemble: Optional["PolicyEnsemble"], domain: Optional[Domain] + ) -> None: + if ensemble is None: + return + + rule_policy = next( + (p for p in ensemble.policies if isinstance(p, RulePolicy)), None + ) + if not rule_policy or not rule_policy._enable_fallback_prediction: + return + + if ( + domain is None + or rule_policy._fallback_action_name not in domain.action_names + ): + raise InvalidDomain( + f"The fallback action '{rule_policy._fallback_action_name}' which was " + f"configured for the {RulePolicy.__name__} must be present in the " + f"domain." + ) + + def _create_feature_key(self, states: List[Dict]) -> Text: + + feature_str = "" + for state in states: + if state: + feature_str += "|" + for feature in state.keys(): + feature_str += feature + " " + feature_str = feature_str.strip() + + return feature_str + + @staticmethod + def _get_active_form_name(state: Dict[Text, float]) -> Optional[Text]: + # by construction there is only one active form + return next( + ( + state_name[len(ACTIVE_FORM_PREFIX) :] + for state_name, prob in state.items() + if ACTIVE_FORM_PREFIX in state_name + and state_name != ACTIVE_FORM_PREFIX + "None" + and prob > 0 + ), + None, + ) + + @staticmethod + def _prev_action_listen_in_state(state: Dict[Text, float]) -> bool: + return any( + PREV_PREFIX + ACTION_LISTEN_NAME in state_name and prob > 0 + for state_name, prob in state.items() + ) + + @staticmethod + def _modified_states( + states: List[Dict[Text, float]] + ) -> List[Optional[Dict[Text, float]]]: + """Modifies the states to create feature keys for form unhappy path conditions. + + Args: + states: a representation of a tracker + as a list of dictionaries containing features + + Returns: + modified states + """ + + indicator = PREV_PREFIX + RULE_SNIPPET_ACTION_NAME + state_only_with_action = {indicator: 1} + # leave only last 2 dialogue turns to + # - capture previous meaningful action before action_listen + # - ignore previous intent + if len(states) > 2 and states[-2] is not None: + state_only_with_action = { + state_name: prob + for state_name, prob in states[-2].items() + if PREV_PREFIX in state_name and prob > 0 + } + + # add `prev_...` to show that it should not be a first turn + if indicator not in state_only_with_action and indicator not in states[-1]: + return [{indicator: 1}, state_only_with_action, states[-1]] + + return [state_only_with_action, states[-1]] + + @staticmethod + def _clean_feature_keys(lookup: Dict[Text, Text]) -> Dict[Text, Text]: + # remove action_listens that were added after conditions + updated_lookup = lookup.copy() + for feature_key, action in lookup.items(): + # Delete rules if there is no prior action or if it would predict + # the `...` action + if PREV_PREFIX not in feature_key or action == RULE_SNIPPET_ACTION_NAME: + del updated_lookup[feature_key] + elif RULE_SNIPPET_ACTION_NAME in feature_key: + # If the previous action is `...` -> remove any specific state + # requirements for that state (anything can match this state) + new_feature_key = re.sub( + rf".*{PREV_PREFIX}\.\.\.[^|]*", "", feature_key + ) + + if new_feature_key: + if new_feature_key.startswith("|"): + new_feature_key = new_feature_key[1:] + if new_feature_key.endswith("|"): + new_feature_key = new_feature_key[:-1] + updated_lookup[new_feature_key] = action + + del updated_lookup[feature_key] + + return updated_lookup + + def _create_form_unhappy_lookup_from_states( + self, + trackers_as_states: List[List[Dict]], + trackers_as_actions: List[List[Text]], + ) -> Dict[Text, Text]: + """Creates lookup dictionary from the tracker represented as states. + + Args: + trackers_as_states: representation of the trackers as a list of states + trackers_as_actions: representation of the trackers as a list of actions + + Returns: + lookup dictionary + """ + + lookup = {} + for states, actions in zip(trackers_as_states, trackers_as_actions): + action = actions[0] + active_form = self._get_active_form_name(states[-1]) + # even if there are two identical feature keys + # their form will be the same + # because of `active_form_...` feature + if active_form: + states = self._modified_states(states) + feature_key = self._create_feature_key(states) + + # Since rule snippets and stories inside the form contain + # only unhappy paths, notify the form that + # it was predicted after an answer to a different question and + # therefore it should not validate user input for requested slot + if ( + # form is predicted after action_listen in unhappy path, + # therefore no validation is needed + self._prev_action_listen_in_state(states[-1]) + and action == active_form + ): + lookup[feature_key] = DO_NOT_VALIDATE_FORM + elif ( + # some action other than action_listen and active_form + # is predicted in unhappy path, + # therefore active_form shouldn't be predicted by the rule + not self._prev_action_listen_in_state(states[-1]) + and action not in {ACTION_LISTEN_NAME, active_form} + ): + lookup[feature_key] = DO_NOT_PREDICT_FORM_ACTION + return lookup + + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: + + # only consider original trackers (no augmented ones) + training_trackers = [ + t + for t in training_trackers + if not hasattr(t, "is_augmented") or not t.is_augmented + ] + # only use trackers from rule-based training data + rule_trackers = [t for t in training_trackers if t.is_rule_tracker] + ( + rule_trackers_as_states, + rule_trackers_as_actions, + ) = self.featurizer.training_states_and_actions(rule_trackers, domain) + + rules_lookup = self._create_lookup_from_states( + rule_trackers_as_states, rule_trackers_as_actions + ) + + self.lookup[RULES] = self._clean_feature_keys(rules_lookup) + + story_trackers = [t for t in training_trackers if not t.is_rule_tracker] + ( + story_trackers_as_states, + story_trackers_as_actions, + ) = self.featurizer.training_states_and_actions(story_trackers, domain) + + # use all trackers to find negative rules in unhappy paths + trackers_as_states = rule_trackers_as_states + story_trackers_as_states + trackers_as_actions = rule_trackers_as_actions + story_trackers_as_actions + + # negative rules are not anti-rules, they are auxiliary to actual rules + form_unhappy_lookup = self._create_form_unhappy_lookup_from_states( + trackers_as_states, trackers_as_actions + ) + self.lookup[RULES_FOR_FORM_UNHAPPY_PATH] = self._clean_feature_keys( + form_unhappy_lookup + ) + + # TODO use story_trackers and rule_trackers + # to check that stories don't contradict rules + + logger.debug(f"Memorized '{len(self.lookup[RULES])}' unique rules.") + + @staticmethod + def _features_in_state(features: List[Text], state: Dict[Text, float]) -> bool: + + state_slots = defaultdict(set) + for s in state.keys(): + if s.startswith("slot"): + state_slots[s[: s.rfind("_")]].add(s) + + f_slots = defaultdict(set) + for f in features: + # TODO: this is a hack to make a rule know + # that slot or form should not be set; + # `_None` is added inside domain to indicate that + # the feature should not be present + if f.endswith("_None"): + if any(f[: f.rfind("_")] in key for key in state.keys()): + return False + elif f not in state: + return False + elif f.startswith("slot"): + f_slots[f[: f.rfind("_")]].add(f) + + for k, v in f_slots.items(): + if state_slots[k] != v: + return False + + return True + + def _rule_is_good( + self, rule_key: Text, turn_index: int, state: Dict[Text, float] + ) -> bool: + """Check if rule is satisfied with current state at turn.""" + + # turn_index goes back in time + rule_turns = list(reversed(rule_key.split("|"))) + + return bool( + # rule is shorter than current turn index + turn_index >= len(rule_turns) + # current rule and state turns are empty + or (not rule_turns[turn_index] and not state) + # check that current rule turn features are present in current state turn + or ( + rule_turns[turn_index] + and state + and self._features_in_state(rule_turns[turn_index].split(), state) + ) + ) + + def _get_possible_keys( + self, lookup: Dict[Text, Text], states: List[Dict[Text, float]] + ) -> Set[Text]: + possible_keys = set(lookup.keys()) + for i, state in enumerate(reversed(states)): + # find rule keys that correspond to current state + possible_keys = set( + filter(lambda _key: self._rule_is_good(_key, i, state), possible_keys) + ) + return possible_keys + + @staticmethod + def _find_action_from_default_actions( + tracker: DialogueStateTracker, + ) -> Optional[Text]: + if ( + not tracker.latest_action_name == ACTION_LISTEN_NAME + or not tracker.latest_message + ): + return None + + default_action_name = DEFAULT_ACTION_MAPPINGS.get( + tracker.latest_message.intent.get("name") + ) + + if default_action_name: + logger.debug(f"Predicted default action '{default_action_name}'.") + + return default_action_name + + @staticmethod + def _find_action_from_form_happy_path( + tracker: DialogueStateTracker, + ) -> Optional[Text]: + + active_form_name = tracker.active_loop_name() + active_form_rejected = tracker.active_loop.get("rejected") + should_predict_form = ( + active_form_name + and not active_form_rejected + and tracker.latest_action_name != active_form_name + ) + should_predict_listen = ( + active_form_name + and not active_form_rejected + and tracker.latest_action_name == active_form_name + ) + + if should_predict_form: + logger.debug(f"Predicted form '{active_form_name}'.") + return active_form_name + + # predict `action_listen` if form action was run successfully + if should_predict_listen: + logger.debug( + f"Predicted '{ACTION_LISTEN_NAME}' after form '{active_form_name}'." + ) + return ACTION_LISTEN_NAME + + def _find_action_from_rules( + self, tracker: DialogueStateTracker, domain: Domain + ) -> Optional[Text]: + tracker_as_states = self.featurizer.prediction_states([tracker], domain) + states = tracker_as_states[0] + + logger.debug(f"Current tracker state: {states}") + + rule_keys = self._get_possible_keys(self.lookup[RULES], states) + predicted_action_name = None + best_rule_key = "" + if rule_keys: + # TODO check that max is correct + # if there are several rules, + # it should mean that some rule is a subset of another rule + best_rule_key = max(rule_keys, key=len) + predicted_action_name = self.lookup[RULES].get(best_rule_key) + + active_form_name = tracker.active_loop_name() + if active_form_name: + # find rules for unhappy path of the form + form_unhappy_keys = self._get_possible_keys( + self.lookup[RULES_FOR_FORM_UNHAPPY_PATH], states + ) + # there could be several unhappy path conditions + unhappy_path_conditions = [ + self.lookup[RULES_FOR_FORM_UNHAPPY_PATH].get(key) + for key in form_unhappy_keys + ] + + # Check if a rule that predicted action_listen + # was applied inside the form. + # Rules might not explicitly switch back to the `Form`. + # Hence, we have to take care of that. + predicted_listen_from_general_rule = ( + predicted_action_name == ACTION_LISTEN_NAME + and ACTIVE_FORM_PREFIX + active_form_name not in best_rule_key + ) + if predicted_listen_from_general_rule: + if DO_NOT_PREDICT_FORM_ACTION not in unhappy_path_conditions: + # negative rules don't contain a key that corresponds to + # the fact that active_form shouldn't be predicted + logger.debug( + f"Predicted form '{active_form_name}' by overwriting " + f"'{ACTION_LISTEN_NAME}' predicted by general rule." + ) + return active_form_name + + # do not predict anything + predicted_action_name = None + + if DO_NOT_VALIDATE_FORM in unhappy_path_conditions: + logger.debug("Added `FormValidation(False)` event.") + tracker.update(FormValidation(False)) + + if predicted_action_name is not None: + logger.debug( + f"There is a rule for the next action '{predicted_action_name}'." + ) + else: + logger.debug("There is no applicable rule.") + + return predicted_action_name + + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: + + result = self._default_predictions(domain) + + # Rasa Open Source default actions overrule anything. If users want to achieve + # the same, they need to write a rule or make sure that their form rejects + # accordingly. + default_action_name = self._find_action_from_default_actions(tracker) + if default_action_name: + return self._prediction_result(default_action_name, tracker, domain) + + # A form has priority over any other rule. + # The rules or any other prediction will be applied only if a form was rejected. + # If we are in a form, and the form didn't run previously or rejected, we can + # simply force predict the form. + form_happy_path_action_name = self._find_action_from_form_happy_path(tracker) + if form_happy_path_action_name: + return self._prediction_result(form_happy_path_action_name, tracker, domain) + + rules_action_name = self._find_action_from_rules(tracker, domain) + if rules_action_name: + return self._prediction_result(rules_action_name, tracker, domain) + + return result + + def _default_predictions(self, domain: Domain) -> List[float]: + result = super()._default_predictions(domain) + + if self._enable_fallback_prediction: + result[ + domain.index_for_action(self._fallback_action_name) + ] = self._core_fallback_threshold + return result diff --git a/rasa/core/policies/sklearn_policy.py b/rasa/core/policies/sklearn_policy.py index c4a7d3ad79a2..0c6b358b707e 100644 --- a/rasa/core/policies/sklearn_policy.py +++ b/rasa/core/policies/sklearn_policy.py @@ -1,10 +1,20 @@ -import typing import json import logging -import numpy as np import os import pickle -import warnings +import typing +from typing import Any, Callable, Dict, List, Optional, Text, Tuple + +import numpy as np +import rasa.utils.io +from rasa.core.constants import DEFAULT_POLICY_PRIORITY +from rasa.core.domain import Domain +from rasa.core.featurizers import MaxHistoryTrackerFeaturizer, TrackerFeaturizer +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter +from rasa.core.policies.policy import Policy +from rasa.core.trackers import DialogueStateTracker +from rasa.core.training.data import DialogueTrainingData +from rasa.utils.common import raise_warning from sklearn.base import clone from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV @@ -12,15 +22,6 @@ # noinspection PyProtectedMember from sklearn.utils import shuffle as sklearn_shuffle -from typing import Optional, Any, List, Text, Dict, Callable - -import rasa.utils.io -from rasa.core import utils -from rasa.core.domain import Domain -from rasa.core.featurizers import TrackerFeaturizer, MaxHistoryTrackerFeaturizer -from rasa.core.policies.policy import Policy -from rasa.core.trackers import DialogueStateTracker -from rasa.core.constants import DEFAULT_POLICY_PRIORITY logger = logging.getLogger(__name__) @@ -41,7 +42,7 @@ def __init__( scoring: Optional[Text or List or Dict or Callable] = "accuracy", label_encoder: LabelEncoder = LabelEncoder(), shuffle: bool = True, - **kwargs: Any + **kwargs: Any, ) -> None: """Create a new sklearn policy. @@ -68,7 +69,7 @@ def __init__( "MaxHistoryTrackerFeaturizer." "".format(type(featurizer).__name__) ) - super(SklearnPolicy, self).__init__(featurizer, priority) + super().__init__(featurizer, priority) self.model = model or self._default_model() self.cv = cv @@ -82,46 +83,44 @@ def __init__( self._train_params = kwargs @staticmethod - def _default_model(): + def _default_model() -> Any: return LogisticRegression(solver="liblinear", multi_class="auto") @property def _state(self): return {attr: getattr(self, attr) for attr in self._pickle_params} - def model_architecture(self, **kwargs): + def model_architecture(self, **kwargs) -> Any: # filter out kwargs that cannot be passed to model train_params = self._get_valid_params(self.model.__init__, **kwargs) return self.model.set_params(**train_params) - def _extract_training_data(self, training_data): + def _extract_training_data( + self, training_data: DialogueTrainingData + ) -> Tuple[np.ndarray, np.ndarray]: # transform y from one-hot to num_classes X, y = training_data.X, training_data.y.argmax(axis=-1) if self.shuffle: X, y = sklearn_shuffle(X, y) return X, y - def _preprocess_data(self, X, y=None): - Xt = X.reshape(X.shape[0], -1) - if y is None: - return Xt - else: - yt = self.label_encoder.transform(y) - return Xt, yt + def _preprocess_data(self, X) -> np.ndarray: + return X.reshape(X.shape[0], -1) - def _search_and_score(self, model, X, y, param_grid): + def _search_and_score(self, model, X, y, param_grid) -> Tuple[Any, Any]: search = GridSearchCV( model, param_grid=param_grid, cv=self.cv, scoring="accuracy", verbose=1 ) search.fit(X, y) - print ("Best params:", search.best_params_) + print("Best params:", search.best_params_) return search.best_estimator_, search.best_score_ def train( self, training_trackers: List[DialogueStateTracker], domain: Domain, - **kwargs: Any + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, ) -> None: training_data = self.featurize_for_training(training_trackers, domain, **kwargs) @@ -133,7 +132,8 @@ def train( # Note: clone is called throughout to avoid mutating default # arguments. self.label_encoder = clone(self.label_encoder).fit(y) - Xt, yt = self._preprocess_data(X, y) + Xt = self._preprocess_data(X) + yt = self.label_encoder.transform(y) if self.cv is None: model = clone(model).fit(Xt, yt) @@ -144,9 +144,9 @@ def train( self.model = model logger.info("Done fitting sklearn policy model") if score is not None: - logger.info("Cross validation score: {:.5f}".format(score)) + logger.info(f"Cross validation score: {score:.5f}") - def _postprocess_prediction(self, y_proba, domain): + def _postprocess_prediction(self, y_proba, domain) -> List[float]: yp = y_proba[0].tolist() # Some classes might not be part of the training labels. Since @@ -160,7 +160,11 @@ def _postprocess_prediction(self, y_proba, domain): return y_filled def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: X = self.featurizer.create_X([tracker], domain) Xt = self._preprocess_data(X) @@ -175,13 +179,12 @@ def persist(self, path: Text) -> None: meta = {"priority": self.priority} meta_file = os.path.join(path, "sklearn_policy.json") - utils.dump_obj_as_json_to_file(meta_file, meta) + rasa.utils.io.dump_obj_as_json_to_file(meta_file, meta) filename = os.path.join(path, "sklearn_model.pkl") - with open(filename, "wb") as f: - pickle.dump(self._state, f) + rasa.utils.io.pickle_dump(filename, self._state) else: - warnings.warn( + raise_warning( "Persist called without a trained model present. " "Nothing to persist then!" ) @@ -203,10 +206,11 @@ def load(cls, path: Text) -> Policy: meta_file = os.path.join(path, "sklearn_policy.json") meta = json.loads(rasa.utils.io.read_file(meta_file)) + policy = cls(featurizer=featurizer, priority=meta["priority"]) - with open(filename, "rb") as f: - state = pickle.load(f) + state = rasa.utils.io.pickle_load(filename) + vars(policy).update(state) logger.info("Loaded sklearn model") diff --git a/rasa/core/policies/ted_policy.py b/rasa/core/policies/ted_policy.py new file mode 100644 index 000000000000..cb703cd04800 --- /dev/null +++ b/rasa/core/policies/ted_policy.py @@ -0,0 +1,653 @@ +import copy +import logging +import os +from pathlib import Path + +import numpy as np +import tensorflow as tf +import tensorflow_addons as tfa + +from typing import Any, List, Optional, Text, Dict, Tuple, Union + +import rasa.utils.io as io_utils +from rasa.core.domain import Domain +from rasa.core.featurizers import ( + TrackerFeaturizer, + FullDialogueTrackerFeaturizer, + LabelTokenizerSingleStateFeaturizer, + MaxHistoryTrackerFeaturizer, +) +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter +from rasa.core.policies.policy import Policy +from rasa.core.constants import DEFAULT_POLICY_PRIORITY, DIALOGUE +from rasa.core.trackers import DialogueStateTracker +from rasa.utils import train_utils +from rasa.utils.tensorflow import layers +from rasa.utils.tensorflow.transformer import TransformerEncoder +from rasa.utils.tensorflow.models import RasaModel +from rasa.utils.tensorflow.model_data import RasaModelData, FeatureSignature +from rasa.utils.tensorflow.constants import ( + LABEL, + HIDDEN_LAYERS_SIZES, + TRANSFORMER_SIZE, + NUM_TRANSFORMER_LAYERS, + NUM_HEADS, + BATCH_SIZES, + BATCH_STRATEGY, + EPOCHS, + RANDOM_SEED, + RANKING_LENGTH, + LOSS_TYPE, + SIMILARITY_TYPE, + NUM_NEG, + EVAL_NUM_EXAMPLES, + EVAL_NUM_EPOCHS, + NEGATIVE_MARGIN_SCALE, + REGULARIZATION_CONSTANT, + SCALE_LOSS, + USE_MAX_NEG_SIM, + MAX_NEG_SIM, + MAX_POS_SIM, + EMBEDDING_DIMENSION, + DROP_RATE_DIALOGUE, + DROP_RATE_LABEL, + DROP_RATE_ATTENTION, + WEIGHT_SPARSITY, + KEY_RELATIVE_ATTENTION, + VALUE_RELATIVE_ATTENTION, + MAX_RELATIVE_POSITION, + SOFTMAX, + AUTO, + BALANCED, + TENSORBOARD_LOG_DIR, + TENSORBOARD_LOG_LEVEL, +) + + +logger = logging.getLogger(__name__) + +DIALOGUE_FEATURES = f"{DIALOGUE}_features" +LABEL_FEATURES = f"{LABEL}_features" +LABEL_IDS = f"{LABEL}_ids" + +SAVE_MODEL_FILE_NAME = "ted_policy" + + +class TEDPolicy(Policy): + """Transformer Embedding Dialogue (TED) Policy is described in + https://arxiv.org/abs/1910.00486. + + This policy has a pre-defined architecture, which comprises the + following steps: + - concatenate user input (user intent and entities), previous system actions, + slots and active forms for each time step into an input vector to + pre-transformer embedding layer; + - feed it to transformer; + - apply a dense layer to the output of the transformer to get embeddings of a + dialogue for each time step; + - apply a dense layer to create embeddings for system actions for each time + step; + - calculate the similarity between the dialogue embedding and embedded system + actions. This step is based on the StarSpace + (https://arxiv.org/abs/1709.03856) idea. + """ + + SUPPORTS_ONLINE_TRAINING = True + + # please make sure to update the docs when changing a default parameter + defaults = { + # ## Architecture of the used neural network + # Hidden layer sizes for layers before the dialogue and label embedding layers. + # The number of hidden layers is equal to the length of the corresponding + # list. + HIDDEN_LAYERS_SIZES: {DIALOGUE: [], LABEL: []}, + # Number of units in transformer + TRANSFORMER_SIZE: 128, + # Number of transformer layers + NUM_TRANSFORMER_LAYERS: 1, + # Number of attention heads in transformer + NUM_HEADS: 4, + # If 'True' use key relative embeddings in attention + KEY_RELATIVE_ATTENTION: False, + # If 'True' use value relative embeddings in attention + VALUE_RELATIVE_ATTENTION: False, + # Max position for relative embeddings + MAX_RELATIVE_POSITION: None, + # ## Training parameters + # Initial and final batch sizes: + # Batch size will be linearly increased for each epoch. + BATCH_SIZES: [8, 32], + # Strategy used whenc creating batches. + # Can be either 'sequence' or 'balanced'. + BATCH_STRATEGY: BALANCED, + # Number of epochs to train + EPOCHS: 1, + # Set random seed to any 'int' to get reproducible results + RANDOM_SEED: None, + # ## Parameters for embeddings + # Dimension size of embedding vectors + EMBEDDING_DIMENSION: 20, + # The number of incorrect labels. The algorithm will minimize + # their similarity to the user input during training. + NUM_NEG: 20, + # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'. + SIMILARITY_TYPE: AUTO, + # The type of the loss function, either 'softmax' or 'margin'. + LOSS_TYPE: SOFTMAX, + # Number of top actions to normalize scores for loss type 'softmax'. + # Set to 0 to turn off normalization. + RANKING_LENGTH: 10, + # Indicates how similar the algorithm should try to make embedding vectors + # for correct labels. + # Should be 0.0 < ... < 1.0 for 'cosine' similarity type. + MAX_POS_SIM: 0.8, + # Maximum negative similarity for incorrect labels. + # Should be -1.0 < ... < 1.0 for 'cosine' similarity type. + MAX_NEG_SIM: -0.2, + # If 'True' the algorithm only minimizes maximum similarity over + # incorrect intent labels, used only if 'loss_type' is set to 'margin'. + USE_MAX_NEG_SIM: True, + # If 'True' scale loss inverse proportionally to the confidence + # of the correct prediction + SCALE_LOSS: True, + # ## Regularization parameters + # The scale of regularization + REGULARIZATION_CONSTANT: 0.001, + # The scale of how important is to minimize the maximum similarity + # between embeddings of different labels, + # used only if 'loss_type' is set to 'margin'. + NEGATIVE_MARGIN_SCALE: 0.8, + # Dropout rate for embedding layers of dialogue features. + DROP_RATE_DIALOGUE: 0.1, + # Dropout rate for embedding layers of label, e.g. action, features. + DROP_RATE_LABEL: 0.0, + # Dropout rate for attention. + DROP_RATE_ATTENTION: 0, + # Sparsity of the weights in dense layers + WEIGHT_SPARSITY: 0.8, + # ## Evaluation parameters + # How often calculate validation accuracy. + # Small values may hurt performance, e.g. model accuracy. + EVAL_NUM_EPOCHS: 20, + # How many examples to use for hold out validation set + # Large values may hurt performance, e.g. model accuracy. + EVAL_NUM_EXAMPLES: 0, + # If you want to use tensorboard to visualize training and validation metrics, + # set this option to a valid output directory. + TENSORBOARD_LOG_DIR: None, + # Define when training metrics for tensorboard should be logged. + # Either after every epoch or for every training step. + # Valid values: 'epoch' and 'minibatch' + TENSORBOARD_LOG_LEVEL: "epoch", + } + + @staticmethod + def _standard_featurizer(max_history: Optional[int] = None) -> TrackerFeaturizer: + if max_history is None: + return FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer()) + else: + return MaxHistoryTrackerFeaturizer( + LabelTokenizerSingleStateFeaturizer(), max_history=max_history + ) + + def __init__( + self, + featurizer: Optional[TrackerFeaturizer] = None, + priority: int = DEFAULT_POLICY_PRIORITY, + max_history: Optional[int] = None, + model: Optional[RasaModel] = None, + **kwargs: Any, + ) -> None: + """Declare instance variables with default values.""" + + if not featurizer: + featurizer = self._standard_featurizer(max_history) + + super().__init__(featurizer, priority) + + self._load_params(**kwargs) + + self.model = model + + self._label_data: Optional[RasaModelData] = None + self.data_example: Optional[Dict[Text, List[np.ndarray]]] = None + + def _load_params(self, **kwargs: Dict[Text, Any]) -> None: + self.config = copy.deepcopy(self.defaults) + self.config.update(kwargs) + + self.config = train_utils.check_deprecated_options(self.config) + + self.config = train_utils.update_similarity_type(self.config) + self.config = train_utils.update_evaluation_parameters(self.config) + + # data helpers + # noinspection PyPep8Naming + @staticmethod + def _label_ids_for_Y(data_Y: np.ndarray) -> np.ndarray: + """Prepare Y data for training: extract label_ids. + + label_ids are indices of labels, while `data_Y` contains one-hot encodings. + """ + + return data_Y.argmax(axis=-1) + + # noinspection PyPep8Naming + def _label_features_for_Y(self, label_ids: np.ndarray) -> np.ndarray: + """Prepare Y data for training: features for label_ids.""" + + all_label_features = self._label_data.get(LABEL_FEATURES)[0] + + is_full_dialogue_featurizer_used = len(label_ids.shape) == 2 + if is_full_dialogue_featurizer_used: + return np.stack( + [ + np.stack( + [all_label_features[label_idx] for label_idx in seq_label_ids] + ) + for seq_label_ids in label_ids + ] + ) + + # max history featurizer is used + return np.stack([all_label_features[label_idx] for label_idx in label_ids]) + + # noinspection PyPep8Naming + def _create_model_data( + self, data_X: np.ndarray, data_Y: Optional[np.ndarray] = None + ) -> RasaModelData: + """Combine all model related data into RasaModelData.""" + + label_ids = np.array([]) + Y = np.array([]) + + if data_Y is not None: + label_ids = self._label_ids_for_Y(data_Y) + Y = self._label_features_for_Y(label_ids) + # explicitly add last dimension to label_ids + # to track correctly dynamic sequences + label_ids = np.expand_dims(label_ids, -1) + + model_data = RasaModelData(label_key=LABEL_IDS) + model_data.add_features(DIALOGUE_FEATURES, [data_X]) + model_data.add_features(LABEL_FEATURES, [Y]) + model_data.add_features(LABEL_IDS, [label_ids]) + + return model_data + + def _create_label_data(self, domain: Domain) -> RasaModelData: + # encode all label_ids with policies' featurizer + state_featurizer = self.featurizer.state_featurizer + all_labels = state_featurizer.create_encoded_all_actions(domain) + all_labels = all_labels.astype(np.float32) + + label_data = RasaModelData() + label_data.add_features(LABEL_FEATURES, [all_labels]) + return label_data + + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: + """Train the policy on given training trackers.""" + + # dealing with training data + training_data = self.featurize_for_training(training_trackers, domain, **kwargs) + + self._label_data = self._create_label_data(domain) + + # extract actual training data to feed to model + model_data = self._create_model_data(training_data.X, training_data.y) + if model_data.is_empty(): + logger.error( + f"Can not train '{self.__class__.__name__}'. No data was provided. " + f"Skipping training of the policy." + ) + return + + # keep one example for persisting and loading + self.data_example = model_data.first_data_example() + + self.model = TED( + model_data.get_signature(), + self.config, + isinstance(self.featurizer, MaxHistoryTrackerFeaturizer), + self._label_data, + ) + + self.model.fit( + model_data, + self.config[EPOCHS], + self.config[BATCH_SIZES], + self.config[EVAL_NUM_EXAMPLES], + self.config[EVAL_NUM_EPOCHS], + batch_strategy=self.config[BATCH_STRATEGY], + ) + + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: + """Predict the next action the bot should take. + + Return the list of probabilities for the next actions. + """ + + if self.model is None: + return self._default_predictions(domain) + + # create model data from tracker + data_X = self.featurizer.create_X([tracker], domain) + model_data = self._create_model_data(data_X) + + output = self.model.predict(model_data) + + confidence = output["action_scores"].numpy() + # remove batch dimension and take the last prediction in the sequence + confidence = confidence[0, -1, :] + + if self.config[LOSS_TYPE] == SOFTMAX and self.config[RANKING_LENGTH] > 0: + confidence = train_utils.normalize(confidence, self.config[RANKING_LENGTH]) + + return confidence.tolist() + + def persist(self, path: Text) -> None: + """Persists the policy to a storage.""" + + if self.model is None: + logger.debug( + "Method `persist(...)` was called " + "without a trained model present. " + "Nothing to persist then!" + ) + return + + model_path = Path(path) + tf_model_file = model_path / f"{SAVE_MODEL_FILE_NAME}.tf_model" + + io_utils.create_directory_for_file(tf_model_file) + + self.featurizer.persist(path) + + self.model.save(str(tf_model_file)) + + io_utils.json_pickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.priority.pkl", self.priority + ) + io_utils.pickle_dump( + model_path / f"{SAVE_MODEL_FILE_NAME}.meta.pkl", self.config + ) + io_utils.json_pickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.data_example.pkl", self.data_example + ) + io_utils.json_pickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.label_data.pkl", self._label_data + ) + + @classmethod + def load(cls, path: Text) -> "TEDPolicy": + """Loads a policy from the storage. + + **Needs to load its featurizer** + """ + + if not os.path.exists(path): + raise Exception( + f"Failed to load TED policy model. Path " + f"'{os.path.abspath(path)}' doesn't exist." + ) + + model_path = Path(path) + tf_model_file = model_path / f"{SAVE_MODEL_FILE_NAME}.tf_model" + + featurizer = TrackerFeaturizer.load(path) + + if not (model_path / f"{SAVE_MODEL_FILE_NAME}.data_example.pkl").is_file(): + return cls(featurizer=featurizer) + + loaded_data = io_utils.json_unpickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.data_example.pkl" + ) + label_data = io_utils.json_unpickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.label_data.pkl" + ) + meta = io_utils.pickle_load(model_path / f"{SAVE_MODEL_FILE_NAME}.meta.pkl") + priority = io_utils.json_unpickle( + model_path / f"{SAVE_MODEL_FILE_NAME}.priority.pkl" + ) + + model_data_example = RasaModelData(label_key=LABEL_IDS, data=loaded_data) + meta = train_utils.update_similarity_type(meta) + + model = TED.load( + str(tf_model_file), + model_data_example, + data_signature=model_data_example.get_signature(), + config=meta, + max_history_tracker_featurizer_used=isinstance( + featurizer, MaxHistoryTrackerFeaturizer + ), + label_data=label_data, + ) + + # build the graph for prediction + predict_data_example = RasaModelData( + label_key=LABEL_IDS, + data={ + feature_name: features + for feature_name, features in model_data_example.items() + if DIALOGUE in feature_name + }, + ) + model.build_for_predict(predict_data_example) + + return cls(featurizer=featurizer, priority=priority, model=model, **meta) + + +# accessing _tf_layers with any key results in key-error, disable it +# pytype: disable=key-error + + +class TED(RasaModel): + def __init__( + self, + data_signature: Dict[Text, List[FeatureSignature]], + config: Dict[Text, Any], + max_history_tracker_featurizer_used: bool, + label_data: RasaModelData, + ) -> None: + super().__init__( + name="TED", + random_seed=config[RANDOM_SEED], + tensorboard_log_dir=config[TENSORBOARD_LOG_DIR], + tensorboard_log_level=config[TENSORBOARD_LOG_LEVEL], + ) + + self.config = config + self.max_history_tracker_featurizer_used = max_history_tracker_featurizer_used + + # data + self.data_signature = data_signature + self._check_data() + + self.predict_data_signature = { + feature_name: features + for feature_name, features in data_signature.items() + if DIALOGUE in feature_name + } + + # optimizer + self.optimizer = tf.keras.optimizers.Adam() + + self.all_labels_embed = None + + label_batch = label_data.prepare_batch() + self.tf_label_data = self.batch_to_model_data_format( + label_batch, label_data.get_signature() + ) + + # metrics + self.action_loss = tf.keras.metrics.Mean(name="loss") + self.action_acc = tf.keras.metrics.Mean(name="acc") + self.metrics_to_log += ["loss", "acc"] + + # set up tf layers + self._tf_layers: Dict[Text : tf.keras.layers.Layer] = {} + self._prepare_layers() + + def _check_data(self) -> None: + if DIALOGUE_FEATURES not in self.data_signature: + raise ValueError( + f"No text features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + if LABEL_FEATURES not in self.data_signature: + raise ValueError( + f"No label features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + + def _prepare_layers(self) -> None: + self._tf_layers[f"loss.{LABEL}"] = layers.DotProductLoss( + self.config[NUM_NEG], + self.config[LOSS_TYPE], + self.config[MAX_POS_SIM], + self.config[MAX_NEG_SIM], + self.config[USE_MAX_NEG_SIM], + self.config[NEGATIVE_MARGIN_SCALE], + self.config[SCALE_LOSS], + # set to 1 to get deterministic behaviour + parallel_iterations=1 if self.random_seed is not None else 1000, + ) + self._tf_layers[f"ffnn.{DIALOGUE}"] = layers.Ffnn( + self.config[HIDDEN_LAYERS_SIZES][DIALOGUE], + self.config[DROP_RATE_DIALOGUE], + self.config[REGULARIZATION_CONSTANT], + self.config[WEIGHT_SPARSITY], + layer_name_suffix=DIALOGUE, + ) + self._tf_layers[f"ffnn.{LABEL}"] = layers.Ffnn( + self.config[HIDDEN_LAYERS_SIZES][LABEL], + self.config[DROP_RATE_LABEL], + self.config[REGULARIZATION_CONSTANT], + self.config[WEIGHT_SPARSITY], + layer_name_suffix=LABEL, + ) + self._tf_layers["transformer"] = TransformerEncoder( + self.config[NUM_TRANSFORMER_LAYERS], + self.config[TRANSFORMER_SIZE], + self.config[NUM_HEADS], + self.config[TRANSFORMER_SIZE] * 4, + self.config[REGULARIZATION_CONSTANT], + dropout_rate=self.config[DROP_RATE_DIALOGUE], + attention_dropout_rate=self.config[DROP_RATE_ATTENTION], + sparsity=self.config[WEIGHT_SPARSITY], + unidirectional=True, + use_key_relative_position=self.config[KEY_RELATIVE_ATTENTION], + use_value_relative_position=self.config[VALUE_RELATIVE_ATTENTION], + max_relative_position=self.config[MAX_RELATIVE_POSITION], + name=DIALOGUE + "_encoder", + ) + self._tf_layers[f"embed.{DIALOGUE}"] = layers.Embed( + self.config[EMBEDDING_DIMENSION], + self.config[REGULARIZATION_CONSTANT], + DIALOGUE, + self.config[SIMILARITY_TYPE], + ) + self._tf_layers[f"embed.{LABEL}"] = layers.Embed( + self.config[EMBEDDING_DIMENSION], + self.config[REGULARIZATION_CONSTANT], + LABEL, + self.config[SIMILARITY_TYPE], + ) + + def _create_all_labels_embed(self) -> Tuple[tf.Tensor, tf.Tensor]: + all_labels = self.tf_label_data[LABEL_FEATURES][0] + all_labels_embed = self._embed_label(all_labels) + + return all_labels, all_labels_embed + + def _emebed_dialogue(self, dialogue_in: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: + """Create dialogue level embedding and mask.""" + + # mask different length sequences + # if there is at least one `-1` it should be masked + mask = tf.sign(tf.reduce_max(dialogue_in, axis=-1) + 1) + + dialogue = self._tf_layers[f"ffnn.{DIALOGUE}"](dialogue_in, self._training) + dialogue_transformed = self._tf_layers["transformer"]( + dialogue, 1 - tf.expand_dims(mask, axis=-1), self._training + ) + dialogue_transformed = tfa.activations.gelu(dialogue_transformed) + + if self.max_history_tracker_featurizer_used: + # pick last label if max history featurizer is used + dialogue_transformed = dialogue_transformed[:, -1:, :] + mask = mask[:, -1:] + + dialogue_embed = self._tf_layers[f"embed.{DIALOGUE}"](dialogue_transformed) + + return dialogue_embed, mask + + def _embed_label(self, label_in: Union[tf.Tensor, np.ndarray]) -> tf.Tensor: + label = self._tf_layers[f"ffnn.{LABEL}"](label_in, self._training) + return self._tf_layers[f"embed.{LABEL}"](label) + + def batch_loss( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> tf.Tensor: + batch = self.batch_to_model_data_format(batch_in, self.data_signature) + + dialogue_in = batch[DIALOGUE_FEATURES][0] + label_in = batch[LABEL_FEATURES][0] + + if self.max_history_tracker_featurizer_used: + # add time dimension if max history featurizer is used + label_in = label_in[:, tf.newaxis, :] + + all_labels, all_labels_embed = self._create_all_labels_embed() + + dialogue_embed, mask = self._emebed_dialogue(dialogue_in) + label_embed = self._embed_label(label_in) + + loss, acc = self._tf_layers[f"loss.{LABEL}"]( + dialogue_embed, label_embed, label_in, all_labels_embed, all_labels, mask + ) + + self.action_loss.update_state(loss) + self.action_acc.update_state(acc) + + return loss + + def batch_predict( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> Dict[Text, tf.Tensor]: + batch = self.batch_to_model_data_format(batch_in, self.predict_data_signature) + + dialogue_in = batch[DIALOGUE_FEATURES][0] + + if self.all_labels_embed is None: + _, self.all_labels_embed = self._create_all_labels_embed() + + dialogue_embed, mask = self._emebed_dialogue(dialogue_in) + + sim_all = self._tf_layers[f"loss.{LABEL}"].sim( + dialogue_embed[:, :, tf.newaxis, :], + self.all_labels_embed[tf.newaxis, tf.newaxis, :, :], + mask, + ) + + scores = self._tf_layers[f"loss.{LABEL}"].confidence_from_sim( + sim_all, self.config[SIMILARITY_TYPE] + ) + + return {"action_scores": scores} + + +# pytype: enable=key-error diff --git a/rasa/core/policies/two_stage_fallback.py b/rasa/core/policies/two_stage_fallback.py index cbbfc1a3a680..3ba31c13386f 100644 --- a/rasa/core/policies/two_stage_fallback.py +++ b/rasa/core/policies/two_stage_fallback.py @@ -2,10 +2,9 @@ import logging import os import typing -from typing import List, Text, Optional +from typing import List, Text, Optional, Any import rasa.utils.io -from rasa.core import utils from rasa.core.actions.action import ( ACTION_REVERT_FALLBACK_EVENTS_NAME, ACTION_DEFAULT_FALLBACK_NAME, @@ -13,12 +12,17 @@ ACTION_DEFAULT_ASK_AFFIRMATION_NAME, ACTION_LISTEN_NAME, ) + +from rasa.core.events import UserUttered, ActionExecuted + from rasa.core.constants import USER_INTENT_OUT_OF_SCOPE from rasa.core.domain import Domain, InvalidDomain +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.policies.fallback import FallbackPolicy from rasa.core.policies.policy import confidence_scores_for from rasa.core.trackers import DialogueStateTracker from rasa.core.constants import FALLBACK_POLICY_PRIORITY +from rasa.nlu.constants import INTENT_NAME_KEY if typing.TYPE_CHECKING: from rasa.core.policies.ensemble import PolicyEnsemble @@ -78,7 +82,7 @@ def __init__( deny_suggestion_intent_name: The name of the intent which is used to detect that the user denies the suggested intents. """ - super(TwoStageFallbackPolicy, self).__init__( + super().__init__( priority, nlu_threshold, ambiguity_threshold, @@ -97,25 +101,28 @@ def validate_against_domain( return for p in ensemble.policies: - if isinstance(p, cls): - fallback_intent = getattr(p, "deny_suggestion_intent_name") - if domain is None or fallback_intent not in domain.intents: - raise InvalidDomain( - "The intent '{0}' must be present in the " - "domain file to use TwoStageFallbackPolicy. " - "Either include the intent '{0}' in your domain " - "or exclude the TwoStageFallbackPolicy from your " - "policy configuration".format(fallback_intent) - ) + if not isinstance(p, TwoStageFallbackPolicy): + continue + if domain is None or p.deny_suggestion_intent_name not in domain.intents: + raise InvalidDomain( + "The intent '{0}' must be present in the " + "domain file to use TwoStageFallbackPolicy. " + "Either include the intent '{0}' in your domain " + "or exclude the TwoStageFallbackPolicy from your " + "policy configuration".format(p.deny_suggestion_intent_name) + ) def predict_action_probabilities( - self, tracker: DialogueStateTracker, domain: Domain + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, ) -> List[float]: - """Predicts the next action if NLU confidence is low. - """ + """Predicts the next action if NLU confidence is low.""" nlu_data = tracker.latest_message.parse_data - last_intent_name = nlu_data["intent"].get("name", None) + last_intent_name = nlu_data["intent"].get(INTENT_NAME_KEY, None) should_nlu_fallback = self.should_nlu_fallback( nlu_data, tracker.latest_action_name ) @@ -124,9 +131,7 @@ def predict_action_probabilities( if self._is_user_input_expected(tracker): result = confidence_scores_for(ACTION_LISTEN_NAME, 1.0, domain) elif self._has_user_denied(last_intent_name, tracker): - logger.debug( - "User '{}' denied suggested intents.".format(tracker.sender_id) - ) + logger.debug(f"User '{tracker.sender_id}' denied suggested intents.") result = self._results_for_user_denied(tracker, domain) elif user_rephrased and should_nlu_fallback: logger.debug( @@ -137,7 +142,7 @@ def predict_action_probabilities( ACTION_DEFAULT_ASK_AFFIRMATION_NAME, 1.0, domain ) elif user_rephrased: - logger.debug("User '{}' rephrased intent".format(tracker.sender_id)) + logger.debug(f"User '{tracker.sender_id}' rephrased intent") result = confidence_scores_for( ACTION_REVERT_FALLBACK_EVENTS_NAME, 1.0, domain ) @@ -175,11 +180,19 @@ def predict_action_probabilities( return result def _is_user_input_expected(self, tracker: DialogueStateTracker) -> bool: - return tracker.latest_action_name in [ + action_requires_input = tracker.latest_action_name in [ ACTION_DEFAULT_ASK_AFFIRMATION_NAME, ACTION_DEFAULT_ASK_REPHRASE_NAME, self.fallback_action_name, ] + try: + last_utterance_time = tracker.get_last_event_for(UserUttered).timestamp + last_action_time = tracker.get_last_event_for(ActionExecuted).timestamp + input_given = last_action_time < last_utterance_time + except AttributeError: + input_given = False + + return action_requires_input and not input_given def _has_user_denied( self, last_intent: Text, tracker: DialogueStateTracker @@ -214,7 +227,7 @@ def persist(self, path: Text) -> None: "deny_suggestion_intent_name": self.deny_suggestion_intent_name, } rasa.utils.io.create_directory_for_file(config_file) - utils.dump_obj_as_json_to_file(config_file, meta) + rasa.utils.io.dump_obj_as_json_to_file(config_file, meta) @classmethod def load(cls, path: Text) -> "FallbackPolicy": diff --git a/rasa/core/processor.py b/rasa/core/processor.py index d02e502d8791..1926f2c88672 100644 --- a/rasa/core/processor.py +++ b/rasa/core/processor.py @@ -1,35 +1,35 @@ -import json import logging import os +import time from types import LambdaType -from typing import Any, Dict, List, Optional, Text, Tuple +from typing import Any, Dict, List, Optional, Text, Tuple, Union import numpy as np -import time +from rasa.constants import DOCS_URL_POLICIES, DOCS_URL_DOMAINS from rasa.core import jobs -from rasa.core.actions.action import Action -from rasa.core.actions.action import ACTION_LISTEN_NAME, ActionExecutionRejection +from rasa.core.actions.action import ( + ACTION_LISTEN_NAME, + ACTION_SESSION_START_NAME, + Action, + ActionExecutionRejection, +) from rasa.core.channels.channel import ( CollectingOutputChannel, - UserMessage, OutputChannel, + UserMessage, ) -from rasa.core.constants import ( - ACTION_NAME_SENDER_ID_CONNECTOR_STR, - USER_INTENT_RESTART, - UTTER_PREFIX, -) +from rasa.core.constants import USER_INTENT_RESTART, UTTER_PREFIX, REQUESTED_SLOT from rasa.core.domain import Domain from rasa.core.events import ( ActionExecuted, ActionExecutionRejected, + BotUttered, Event, ReminderCancelled, ReminderScheduled, SlotSet, UserUttered, - BotUttered, ) from rasa.core.interpreter import ( INTENT_MESSAGE_PREFIX, @@ -40,15 +40,16 @@ from rasa.core.policies.ensemble import PolicyEnsemble from rasa.core.tracker_store import TrackerStore from rasa.core.trackers import DialogueStateTracker, EventVerbosity +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils import common as common_utils from rasa.utils.endpoints import EndpointConfig logger = logging.getLogger(__name__) - MAX_NUMBER_OF_PREDICTIONS = int(os.environ.get("MAX_NUMBER_OF_PREDICTIONS", "10")) -class MessageProcessor(object): +class MessageProcessor: def __init__( self, interpreter: NaturalLanguageInterpreter, @@ -84,13 +85,15 @@ async def handle_message( if not self.policy_ensemble or not self.domain: # save tracker state to continue conversation from this state self._save_tracker(tracker) - logger.warning( + common_utils.raise_warning( "No policy ensemble or domain set. Skipping action prediction " - "and execution." + "and execution.", + docs=DOCS_URL_POLICIES, ) return None - await self._predict_and_execute_next_action(message, tracker) + await self._predict_and_execute_next_action(message.output_channel, tracker) + # save tracker state to continue conversation from this state self._save_tracker(tracker) @@ -99,22 +102,23 @@ async def handle_message( else: return None - def predict_next(self, sender_id: Text) -> Optional[Dict[Text, Any]]: + async def predict_next(self, sender_id: Text) -> Optional[Dict[Text, Any]]: # we have a Tracker instance for each user # which maintains conversation state - tracker = self._get_tracker(sender_id) + tracker = await self.get_tracker_with_session_start(sender_id) if not tracker: logger.warning( - "Failed to retrieve or create tracker for sender " - "'{}'.".format(sender_id) + f"Failed to retrieve or create tracker for sender '{sender_id}'." ) return None if not self.policy_ensemble or not self.domain: # save tracker state to continue conversation from this state - logger.warning( - "No policy ensemble or domain set. Skipping action prediction " + common_utils.raise_warning( + "No policy ensemble or domain set. Skipping action prediction." + "You should set a policy before training a model.", + docs=DOCS_URL_POLICIES, ) return None @@ -132,6 +136,84 @@ def predict_next(self, sender_id: Text) -> Optional[Dict[Text, Any]]: "tracker": tracker.current_state(EventVerbosity.AFTER_RESTART), } + async def _update_tracker_session( + self, + tracker: DialogueStateTracker, + output_channel: OutputChannel, + metadata: Optional[Dict] = None, + ) -> None: + """Check the current session in `tracker` and update it if expired. + + An 'action_session_start' is run if the latest tracker session has expired, + or if the tracker does not yet contain any events (only those after the last + restart are considered). + + Args: + metadata: Data sent from client associated with the incoming user message. + tracker: Tracker to inspect. + output_channel: Output channel for potential utterances in a custom + `ActionSessionStart`. + """ + if not tracker.applied_events() or self._has_session_expired(tracker): + logger.debug( + f"Starting a new session for conversation ID '{tracker.sender_id}'." + ) + + await self._run_action( + action=self._get_action(ACTION_SESSION_START_NAME), + tracker=tracker, + output_channel=output_channel, + nlg=self.nlg, + metadata=metadata, + ) + + async def get_tracker_with_session_start( + self, + sender_id: Text, + output_channel: Optional[OutputChannel] = None, + metadata: Optional[Dict] = None, + ) -> Optional[DialogueStateTracker]: + """Get tracker for `sender_id` or create a new tracker for `sender_id`. + + If a new tracker is created, `action_session_start` is run. + + Args: + metadata: Data sent from client associated with the incoming user message. + output_channel: Output channel associated with the incoming user message. + sender_id: Conversation ID for which to fetch the tracker. + + Returns: + Tracker for `sender_id` if available, `None` otherwise. + """ + + tracker = self.get_tracker(sender_id) + if not tracker: + return None + + await self._update_tracker_session(tracker, output_channel, metadata) + + return tracker + + def get_tracker(self, conversation_id: Text) -> Optional[DialogueStateTracker]: + """Get the tracker for a conversation. + + In contrast to `get_tracker_with_session_start` this does not add any + `action_session_start` or `session_start` events at the beginning of a + conversation. + + Args: + conversation_id: The ID of the conversation for which the history should be + retrieved. + + Returns: + Tracker for the conversation. Creates an empty tracker in case it's a new + conversation. + """ + conversation_id = conversation_id or UserMessage.DEFAULT_SENDER_ID + return self.tracker_store.get_or_create_tracker( + conversation_id, append_action_listen=False + ) + async def log_message( self, message: UserMessage, should_save_tracker: bool = True ) -> Optional[DialogueStateTracker]: @@ -147,7 +229,10 @@ async def log_message( message.text = self.message_preprocessor(message.text) # we have a Tracker instance for each user # which maintains conversation state - tracker = self._get_tracker(message.sender_id) + tracker = await self.get_tracker_with_session_start( + message.sender_id, message.output_channel, message.metadata + ) + if tracker: await self._handle_message_with_tracker(message, tracker) @@ -156,8 +241,8 @@ async def log_message( self._save_tracker(tracker) else: logger.warning( - "Failed to retrieve or create tracker for sender " - "'{}'.".format(message.sender_id) + f"Failed to retrieve or create tracker for conversation ID " + f"'{message.sender_id}'." ) return tracker @@ -173,7 +258,7 @@ async def execute_action( # we have a Tracker instance for each user # which maintains conversation state - tracker = self._get_tracker(sender_id) + tracker = await self.get_tracker_with_session_start(sender_id, output_channel) if tracker: action = self._get_action(action_name) await self._run_action( @@ -184,8 +269,8 @@ async def execute_action( self._save_tracker(tracker) else: logger.warning( - "Failed to retrieve or create tracker for sender " - "'{}'.".format(sender_id) + f"Failed to retrieve or create tracker for conversation ID " + f"'{sender_id}'." ) return tracker @@ -203,11 +288,12 @@ def predict_next_action( action = self.domain.action_for_index( max_confidence_index, self.action_endpoint ) + logger.debug( - "Predicted next action '{}' with confidence {:.2f}.".format( - action.name(), action_confidences[max_confidence_index] - ) + f"Predicted next action '{action.name()}' with confidence " + f"{action_confidences[max_confidence_index]:.2f}." ) + return action, policy, action_confidences[max_confidence_index] @staticmethod @@ -247,12 +333,11 @@ async def handle_reminder( ) -> None: """Handle a reminder that is triggered asynchronously.""" - tracker = self._get_tracker(sender_id) + tracker = await self.get_tracker_with_session_start(sender_id, output_channel) if not tracker: logger.warning( - "Failed to retrieve or create tracker for sender " - "'{}'.".format(sender_id) + f"Failed to retrieve tracker for conversation ID '{sender_id}'." ) return None @@ -262,35 +347,98 @@ async def handle_reminder( or not self._is_reminder_still_valid(tracker, reminder_event) ): logger.debug( - "Canceled reminder because it is outdated. " - "(event: {} id: {})".format( - reminder_event.action_name, reminder_event.name - ) + f"Canceled reminder because it is outdated ({reminder_event})." ) else: - # necessary for proper featurization, otherwise the previous - # unrelated message would influence featurization - tracker.update(UserUttered.empty()) - action = self._get_action(reminder_event.action_name) - should_continue = await self._run_action( - action, tracker, output_channel, nlg + intent = reminder_event.intent + entities = reminder_event.entities or {} + await self.trigger_external_user_uttered( + intent, entities, tracker, output_channel ) - if should_continue: - user_msg = UserMessage(None, output_channel, sender_id) - await self._predict_and_execute_next_action(user_msg, tracker) - # save tracker state to continue conversation from this state - self._save_tracker(tracker) + + async def trigger_external_user_uttered( + self, + intent_name: Text, + entities: Optional[Union[List[Dict[Text, Any]], Dict[Text, Text]]], + tracker: DialogueStateTracker, + output_channel: OutputChannel, + ) -> None: + """Triggers an external message. + + Triggers an external message (like a user message, but invisible; + used, e.g., by a reminder or the trigger_intent endpoint). + + Args: + intent_name: Name of the intent to be triggered. + entities: Entities to be passed on. + tracker: The tracker to which the event should be added. + output_channel: The output channel. + """ + if isinstance(entities, list): + entity_list = entities + elif isinstance(entities, dict): + # Allow for a short-hand notation {"ent1": "val1", "ent2": "val2", ...}. + # Useful if properties like 'start', 'end', or 'extractor' are not given, + # e.g. for external events. + entity_list = [ + {"entity": ent, "value": val} for ent, val in entities.items() + ] + elif not entities: + entity_list = [] + else: + common_utils.raise_warning( + f"Invalid entity specification: {entities}. Assuming no entities." + ) + entity_list = [] + tracker.update(UserUttered.create_external(intent_name, entity_list)) + await self._predict_and_execute_next_action(output_channel, tracker) + # save tracker state to continue conversation from this state + self._save_tracker(tracker) @staticmethod - def _log_slots(tracker): + def _log_slots(tracker) -> None: # Log currently set slots slot_values = "\n".join( - ["\t{}: {}".format(s.name, s.value) for s in tracker.slots.values()] + [f"\t{s.name}: {s.value}" for s in tracker.slots.values()] ) if slot_values.strip(): - logger.debug("Current slot values: \n{}".format(slot_values)) + logger.debug(f"Current slot values: \n{slot_values}") + + def _check_for_unseen_features(self, parse_data: Dict[Text, Any]) -> None: + """Warns the user if the NLU parse data contains unrecognized features. - def _get_action(self, action_name): + Checks intents and entities picked up by the NLU interpreter + against the domain and warns the user of those that don't match. + Also considers a list of default intents that are valid but don't + need to be listed in the domain. + + Args: + parse_data: NLUInterpreter parse data to check against the domain. + """ + if not self.domain or self.domain.is_empty(): + return + + intent = parse_data["intent"][INTENT_NAME_KEY] + if intent and intent not in self.domain.intents: + common_utils.raise_warning( + f"Interpreter parsed an intent '{intent}' " + f"which is not defined in the domain. " + f"Please make sure all intents are listed in the domain.", + docs=DOCS_URL_DOMAINS, + ) + + entities = parse_data["entities"] or [] + for element in entities: + entity = element["entity"] + if entity and entity not in self.domain.entities: + common_utils.raise_warning( + f"Interpreter parsed an entity '{entity}' " + f"which is not defined in the domain. " + f"Please make sure all entities are listed in the domain.", + docs=DOCS_URL_DOMAINS, + ) + + def _get_action(self, action_name) -> Optional[Action]: return self.domain.action_for_name(action_name, self.action_endpoint) async def _parse_message(self, message, tracker: DialogueStateTracker = None): @@ -312,6 +460,9 @@ async def _parse_message(self, message, tracker: DialogueStateTracker = None): message.text, parse_data["intent"], parse_data["entities"] ) ) + + self._check_for_unseen_features(parse_data) + return parse_data async def _handle_message_with_tracker( @@ -342,30 +493,41 @@ async def _handle_message_with_tracker( self._log_slots(tracker) logger.debug( - "Logged UserUtterance - " - "tracker now has {} events".format(len(tracker.events)) + f"Logged UserUtterance - tracker now has {len(tracker.events)} events." ) @staticmethod def _should_handle_message(tracker: DialogueStateTracker): return ( not tracker.is_paused() - or tracker.latest_message.intent.get("name") == USER_INTENT_RESTART + or tracker.latest_message.intent.get(INTENT_NAME_KEY) == USER_INTENT_RESTART + ) + + def is_action_limit_reached( + self, num_predicted_actions: int, should_predict_another_action: bool + ) -> bool: + """Check whether the maximum number of predictions has been met. + + Args: + num_predictes_actions: Number of predicted actions. + should_predict_another_action: Whether the last executed action allows + for more actions to be predicted or not. + + Returns: + `True` if the limit of actions to predict has been reached. + """ + return ( + num_predicted_actions >= self.max_number_of_predictions + and should_predict_another_action ) async def _predict_and_execute_next_action( - self, message: UserMessage, tracker: DialogueStateTracker + self, output_channel: OutputChannel, tracker: DialogueStateTracker ): # keep taking actions decided by the policy until it chooses to 'listen' should_predict_another_action = True num_predicted_actions = 0 - def is_action_limit_reached(): - return ( - num_predicted_actions == self.max_number_of_predictions - and should_predict_another_action - ) - # action loop. predicts actions until we hit action listen while ( should_predict_another_action @@ -376,25 +538,35 @@ def is_action_limit_reached(): action, policy, confidence = self.predict_next_action(tracker) should_predict_another_action = await self._run_action( - action, tracker, message.output_channel, self.nlg, policy, confidence + action, tracker, output_channel, self.nlg, policy, confidence ) num_predicted_actions += 1 - if is_action_limit_reached(): + if self.is_action_limit_reached( + num_predicted_actions, should_predict_another_action + ): # circuit breaker was tripped logger.warning( "Circuit breaker tripped. Stopped predicting " - "more actions for sender '{}'".format(tracker.sender_id) + f"more actions for sender '{tracker.sender_id}'." ) if self.on_circuit_break: # call a registered callback - self.on_circuit_break(tracker, message.output_channel, self.nlg) + self.on_circuit_break(tracker, output_channel, self.nlg) - # noinspection PyUnusedLocal @staticmethod - def should_predict_another_action(action_name, events): - is_listen_action = action_name == ACTION_LISTEN_NAME - return not is_listen_action + def should_predict_another_action(action_name: Text) -> bool: + """Determine whether the processor should predict another action. + + Args: + action_name: Name of the latest executed action. + + Returns: + `False` if `action_name` is `ACTION_LISTEN_NAME` or + `ACTION_SESSION_START_NAME`, otherwise `True`. + """ + + return action_name not in (ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME) @staticmethod async def _send_bot_messages( @@ -433,49 +605,53 @@ async def _schedule_reminders( args=[e, tracker.sender_id, output_channel, nlg], id=e.name, replace_existing=True, - name=( - str(e.action_name) - + ACTION_NAME_SENDER_ID_CONNECTOR_STR - + tracker.sender_id - ), + name=e.scheduled_job_name(tracker.sender_id), ) @staticmethod async def _cancel_reminders( events: List[Event], tracker: DialogueStateTracker ) -> None: - """Cancel reminders by action_name""" + """Cancel reminders that match the `ReminderCancelled` event.""" - # All Reminders with the same action name will be cancelled - for e in events: - if isinstance(e, ReminderCancelled): - name_to_check = ( - str(e.action_name) - + ACTION_NAME_SENDER_ID_CONNECTOR_STR - + tracker.sender_id - ) + # All Reminders specified by ReminderCancelled events will be cancelled + for event in events: + if isinstance(event, ReminderCancelled): scheduler = await jobs.scheduler() - for j in scheduler.get_jobs(): - if j.name == name_to_check: - scheduler.remove_job(j.id) + for scheduled_job in scheduler.get_jobs(): + if event.cancels_job_with_name( + scheduled_job.name, tracker.sender_id + ): + scheduler.remove_job(scheduled_job.id) async def _run_action( - self, action, tracker, output_channel, nlg, policy=None, confidence=None - ): + self, + action, + tracker, + output_channel, + nlg, + policy=None, + confidence=None, + metadata: Optional[Dict[Text, Any]] = None, + ) -> bool: # events and return values are used to update # the tracker state after an action has been taken try: + # Here we set optional metadata to the ActionSessionStart, which will then + # be passed to the SessionStart event. Otherwise the metadata will be lost. + if action.name() == ACTION_SESSION_START_NAME: + action.metadata = metadata events = await action.run(output_channel, nlg, tracker, self.domain) except ActionExecutionRejection: events = [ActionExecutionRejected(action.name(), policy, confidence)] tracker.update(events[0]) - return self.should_predict_another_action(action.name(), events) + return self.should_predict_another_action(action.name()) except Exception as e: logger.error( - "Encountered an exception while running action '{}'. " + f"Encountered an exception while running action '{action.name()}'. " "Bot will continue, but the actions events are lost. " "Please check the logs of your action server for " - "more information.".format(action.name()) + "more information." ) logger.debug(e, exc_info=True) events = [] @@ -490,12 +666,15 @@ async def _run_action( await self._schedule_reminders(events, tracker, output_channel, nlg) await self._cancel_reminders(events, tracker) - return self.should_predict_another_action(action.name(), events) + return self.should_predict_another_action(action.name()) - def _warn_about_new_slots(self, tracker, action_name, events): + def _warn_about_new_slots(self, tracker, action_name, events) -> None: # these are the events from that action we have seen during training - if action_name not in self.policy_ensemble.action_fingerprints: + if ( + not self.policy_ensemble + or action_name not in self.policy_ensemble.action_fingerprints + ): return fp = self.policy_ensemble.action_fingerprints[action_name] @@ -504,23 +683,24 @@ def _warn_about_new_slots(self, tracker, action_name, events): if isinstance(e, SlotSet) and e.key not in slots_seen_during_train: s = tracker.slots.get(e.key) if s and s.has_features(): - if e.key == "requested_slot" and tracker.active_form: + if e.key == REQUESTED_SLOT and tracker.active_loop: pass else: - logger.warning( - "Action '{0}' set a slot type '{1}' that " - "it never set during the training. This " - "can throw of the prediction. Make sure to " - "include training examples in your stories " - "for the different types of slots this " - "action can return. Remember: you need to " - "set the slots manually in the stories by " - "adding '- slot{{\"{1}\": {2}}}' " - "after the action." - "".format(action_name, e.key, json.dumps(e.value)) + common_utils.raise_warning( + f"Action '{action_name}' set a slot type '{e.key}' which " + f"it never set during the training. This " + f"can throw off the prediction. Make sure to " + f"include training examples in your stories " + f"for the different types of slots this " + f"action can return. Remember: you need to " + f"set the slots manually in the stories by " + f"adding '- slot{{\"{e.key}\": {e.value}}}' " + f"after the action." ) - def _log_action_on_tracker(self, tracker, action_name, events, policy, confidence): + def _log_action_on_tracker( + self, tracker, action_name, events, policy, confidence + ) -> None: # Ensures that the code still works even if a lazy programmer missed # to type `return []` at the end of an action or the run method # returns `None` for some other reason. @@ -528,9 +708,7 @@ def _log_action_on_tracker(self, tracker, action_name, events, policy, confidenc events = [] logger.debug( - "Action '{}' ended with events '{}'".format( - action_name, ["{}".format(e) for e in events] - ) + f"Action '{action_name}' ended with events '{[e for e in events]}'." ) self._warn_about_new_slots(tracker, action_name, events) @@ -547,11 +725,43 @@ def _log_action_on_tracker(self, tracker, action_name, events, policy, confidenc e.timestamp = time.time() tracker.update(e, self.domain) - def _get_tracker(self, sender_id: Text) -> Optional[DialogueStateTracker]: - sender_id = sender_id or UserMessage.DEFAULT_SENDER_ID - return self.tracker_store.get_or_create_tracker(sender_id) + def _has_session_expired(self, tracker: DialogueStateTracker) -> bool: + """Determine whether the latest session in `tracker` has expired. + + Args: + tracker: Tracker to inspect. + + Returns: + `True` if the session in `tracker` has expired, `False` otherwise. + """ + + if not self.domain.session_config.are_sessions_enabled(): + # tracker has never expired if sessions are disabled + return False + + user_uttered_event: Optional[UserUttered] = tracker.get_last_event_for( + UserUttered + ) + + if not user_uttered_event: + # there is no user event so far so the session should not be considered + # expired + return False - def _save_tracker(self, tracker): + time_delta_in_seconds = time.time() - user_uttered_event.timestamp + has_expired = ( + time_delta_in_seconds / 60 + > self.domain.session_config.session_expiration_time + ) + if has_expired: + logger.debug( + f"The latest session for conversation ID '{tracker.sender_id}' has " + f"expired." + ) + + return has_expired + + def _save_tracker(self, tracker: DialogueStateTracker) -> None: self.tracker_store.save(tracker) def _prob_array_for_action( @@ -568,8 +778,7 @@ def _prob_array_for_action( def _get_next_action_probabilities( self, tracker: DialogueStateTracker ) -> Tuple[Optional[List[float]], Optional[Text]]: - """Collect predictions from ensemble and return action and predictions. - """ + """Collect predictions from ensemble and return action and predictions.""" followup_action = tracker.followup_action if followup_action: @@ -579,11 +788,11 @@ def _get_next_action_probabilities( return result else: logger.error( - "Trying to run unknown follow up action '{}'!" + f"Trying to run unknown follow-up action '{followup_action}'!" "Instead of running that, we will ignore the action " - "and predict the next action.".format(followup_action) + "and predict the next action." ) return self.policy_ensemble.probabilities_using_best_policy( - tracker, self.domain + tracker, self.domain, self.interpreter ) diff --git a/rasa/core/registry.py b/rasa/core/registry.py index f9dbbd612599..26ed019d4cb7 100644 --- a/rasa/core/registry.py +++ b/rasa/core/registry.py @@ -3,9 +3,9 @@ import logging import typing -from typing import Text, Type +from typing import Text, Type, TYPE_CHECKING -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from rasa.core.policies.policy import Policy from rasa.core.featurizers import TrackerFeaturizer @@ -17,9 +17,11 @@ def policy_from_module_path(module_path: Text) -> Type["Policy"]: from rasa.utils.common import class_from_module_path try: - return class_from_module_path(module_path, lookup_path="rasa.core.policies") + return class_from_module_path( + module_path, lookup_path="rasa.core.policies.registry" + ) except ImportError: - raise ImportError("Cannot retrieve policy from path '{}'".format(module_path)) + raise ImportError(f"Cannot retrieve policy from path '{module_path}'") def featurizer_from_module_path(module_path: Text) -> Type["TrackerFeaturizer"]: @@ -29,6 +31,4 @@ def featurizer_from_module_path(module_path: Text) -> Type["TrackerFeaturizer"]: try: return class_from_module_path(module_path, lookup_path="rasa.core.featurizers") except ImportError: - raise ImportError( - "Cannot retrieve featurizer from path '{}'".format(module_path) - ) + raise ImportError(f"Cannot retrieve featurizer from path '{module_path}'") diff --git a/rasa/core/restore.py b/rasa/core/restore.py index 145a3a508290..13f8f4a53a4f 100644 --- a/rasa/core/restore.py +++ b/rasa/core/restore.py @@ -1,20 +1,19 @@ import json import logging -import warnings +import typing from difflib import SequenceMatcher +from typing import List, Text, Tuple import rasa.cli.utils import rasa.utils.io -import typing -from typing import List, Text, Tuple - from rasa.cli import utils as cli_utils from rasa.core.actions.action import ACTION_LISTEN_NAME from rasa.core.channels import console -from rasa.core.channels.channel import UserMessage, CollectingOutputChannel +from rasa.core.channels.channel import CollectingOutputChannel, UserMessage from rasa.core.domain import Domain from rasa.core.events import ActionExecuted, UserUttered from rasa.core.trackers import DialogueStateTracker +from rasa.utils.common import raise_warning if typing.TYPE_CHECKING: from rasa.core.agent import Agent @@ -29,10 +28,10 @@ def _check_prediction_aligns_with_story( p, a = align_lists(last_prediction, actions_between_utterances) if p != a: - warnings.warn( - "Model predicted different actions than the " - "model used to create the story! Expected: " - "{} but got {}.".format(p, a) + raise_warning( + f"The model predicted different actions than the " + f"model used to create the story! Expected: " + f"{p} but got {a}." ) diff --git a/rasa/core/run.py b/rasa/core/run.py index 1a6daef9b4e5..a06f6d9930dc 100644 --- a/rasa/core/run.py +++ b/rasa/core/run.py @@ -1,26 +1,28 @@ import asyncio import logging +import uuid +import os import shutil from functools import partial -from typing import List, Optional, Text, Union +from typing import Any, List, Optional, Text, Union -from sanic import Sanic -from sanic_cors import CORS - -import rasa.core +import rasa.core.utils import rasa.utils +import rasa.utils.common import rasa.utils.io -from rasa.core import constants, utils -from rasa.core.agent import load_agent, Agent +from rasa import model, server +from rasa.constants import ENV_SANIC_BACKLOG +from rasa.core import agent, channels, constants +from rasa.core.agent import Agent +from rasa.core.brokers.broker import EventBroker from rasa.core.channels import console from rasa.core.channels.channel import InputChannel from rasa.core.interpreter import NaturalLanguageInterpreter from rasa.core.lock_store import LockStore from rasa.core.tracker_store import TrackerStore -from rasa.core.utils import AvailableEndpoints, configure_file_logging -from rasa.model import get_model_subdirectories, get_model -from rasa.utils.common import update_sanic_log_level, class_from_module_path -from rasa.server import add_root_route +from rasa.core.utils import AvailableEndpoints +from rasa.utils.common import raise_warning +from sanic import Sanic logger = logging.getLogger() # get the root logger @@ -48,7 +50,7 @@ def create_http_input_channels( return [_create_single_channel(c, k) for c, k in all_credentials.items()] -def _create_single_channel(channel, credentials): +def _create_single_channel(channel, credentials) -> Any: from rasa.core.channels import BUILTIN_CHANNELS if channel in BUILTIN_CHANNELS: @@ -56,7 +58,7 @@ def _create_single_channel(channel, credentials): else: # try to load channel based on class name try: - input_channel_class = class_from_module_path(channel) + input_channel_class = rasa.utils.common.class_from_module_path(channel) return input_channel_class.from_credentials(credentials) except (AttributeError, ImportError): raise Exception( @@ -70,32 +72,35 @@ def _create_single_channel(channel, credentials): def _create_app_without_api(cors: Optional[Union[Text, List[Text]]] = None): app = Sanic(__name__, configure_logging=False) - add_root_route(app) - CORS(app, resources={r"/*": {"origins": cors or ""}}, automatic_options=True) + server.add_root_route(app) + server.configure_cors(app, cors) return app def configure_app( input_channels: Optional[List["InputChannel"]] = None, - cors: Optional[Union[Text, List[Text]]] = None, + cors: Optional[Union[Text, List[Text], None]] = None, auth_token: Optional[Text] = None, enable_api: bool = True, + response_timeout: int = constants.DEFAULT_RESPONSE_TIMEOUT, jwt_secret: Optional[Text] = None, jwt_method: Optional[Text] = None, route: Optional[Text] = "/webhooks/", port: int = constants.DEFAULT_SERVER_PORT, endpoints: Optional[AvailableEndpoints] = None, log_file: Optional[Text] = None, + conversation_id: Optional[Text] = uuid.uuid4().hex, ): """Run the agent.""" from rasa import server - configure_file_logging(logger, log_file) + rasa.core.utils.configure_file_logging(logger, log_file) if enable_api: app = server.create_app( cors_origins=cors, auth_token=auth_token, + response_timeout=response_timeout, jwt_secret=jwt_secret, jwt_method=jwt_method, endpoints=endpoints, @@ -104,12 +109,12 @@ def configure_app( app = _create_app_without_api(cors) if input_channels: - rasa.core.channels.channel.register(input_channels, app, route=route) + channels.channel.register(input_channels, app, route=route) else: input_channels = [] if logger.isEnabledFor(logging.DEBUG): - utils.list_routes(app) + rasa.core.utils.list_routes(app) # configure async loop logging async def configure_async_logging(): @@ -123,12 +128,14 @@ async def configure_async_logging(): async def run_cmdline_io(running_app: Sanic): """Small wrapper to shut down the server once cmd io is done.""" await asyncio.sleep(1) # allow server to start + await console.record_messages( - server_url=constants.DEFAULT_SERVER_FORMAT.format("http", port) + server_url=constants.DEFAULT_SERVER_FORMAT.format("http", port), + sender_id=conversation_id, ) logger.info("Killing Sanic server now.") - running_app.stop() # kill the sanic serverx + running_app.stop() # kill the sanic server app.add_task(run_cmdline_io) @@ -143,6 +150,7 @@ def serve_application( cors: Optional[Union[Text, List[Text]]] = None, auth_token: Optional[Text] = None, enable_api: bool = True, + response_timeout: int = constants.DEFAULT_RESPONSE_TIMEOUT, jwt_secret: Optional[Text] = None, jwt_method: Optional[Text] = None, endpoints: Optional[AvailableEndpoints] = None, @@ -150,8 +158,11 @@ def serve_application( log_file: Optional[Text] = None, ssl_certificate: Optional[Text] = None, ssl_keyfile: Optional[Text] = None, + ssl_ca_file: Optional[Text] = None, ssl_password: Optional[Text] = None, + conversation_id: Optional[Text] = uuid.uuid4().hex, ): + """Run the API entrypoint.""" from rasa import server if not channel and not credentials: @@ -164,19 +175,22 @@ def serve_application( cors, auth_token, enable_api, + response_timeout, jwt_secret, jwt_method, port=port, endpoints=endpoints, log_file=log_file, + conversation_id=conversation_id, ) - ssl_context = server.create_ssl_context(ssl_certificate, ssl_keyfile, ssl_password) + ssl_context = server.create_ssl_context( + ssl_certificate, ssl_keyfile, ssl_ca_file, ssl_password + ) protocol = "https" if ssl_context else "http" logger.info( - "Starting Rasa server on " - "{}".format(constants.DEFAULT_SERVER_FORMAT.format(protocol, port)) + f"Starting Rasa server on {constants.DEFAULT_SERVER_FORMAT.format(protocol, port)}" ) app.register_listener( @@ -184,15 +198,24 @@ def serve_application( "before_server_start", ) - async def clear_model_files(app: Sanic, _loop: Text) -> None: + # noinspection PyUnresolvedReferences + async def clear_model_files(_app: Sanic, _loop: Text) -> None: if app.agent.model_directory: - shutil.rmtree(app.agent.model_directory) + shutil.rmtree(_app.agent.model_directory) app.register_listener(clear_model_files, "after_server_stop") - update_sanic_log_level(log_file) + rasa.utils.common.update_sanic_log_level(log_file) - app.run(host="0.0.0.0", port=port, ssl=ssl_context) + app.run( + host="0.0.0.0", + port=port, + ssl=ssl_context, + backlog=int(os.environ.get(ENV_SANIC_BACKLOG, "100")), + workers=rasa.core.utils.number_of_sanic_workers( + endpoints.lock_store if endpoints else None + ), + ) # noinspection PyUnusedLocal @@ -207,25 +230,23 @@ async def load_agent_on_start( Used to be scheduled on server start (hence the `app` and `loop` arguments).""" - import rasa.core.brokers.utils as broker_utils + # noinspection PyBroadException try: - with get_model(model_path) as unpacked_model: - _, nlu_model = get_model_subdirectories(unpacked_model) - _interpreter = NaturalLanguageInterpreter.create(nlu_model, endpoints.nlu) + with model.get_model(model_path) as unpacked_model: + _, nlu_model = model.get_model_subdirectories(unpacked_model) + _interpreter = NaturalLanguageInterpreter.create(endpoints.nlu or nlu_model) except Exception: - logger.debug("Could not load interpreter from '{}'.".format(model_path)) + logger.debug(f"Could not load interpreter from '{model_path}'.") _interpreter = None - _broker = broker_utils.from_endpoint_config(endpoints.event_broker) - _tracker_store = TrackerStore.find_tracker_store( - None, endpoints.tracker_store, _broker - ) - _lock_store = LockStore.find_lock_store(endpoints.lock_store) + _broker = EventBroker.create(endpoints.event_broker) + _tracker_store = TrackerStore.create(endpoints.tracker_store, event_broker=_broker) + _lock_store = LockStore.create(endpoints.lock_store) model_server = endpoints.model if endpoints and endpoints.model else None - app.agent = await load_agent( + app.agent = await agent.load_agent( model_path, model_server=model_server, remote_storage=remote_storage, @@ -237,7 +258,7 @@ async def load_agent_on_start( ) if not app.agent: - logger.warning( + raise_warning( "Agent could not be loaded with the provided configuration. " "Load default agent without any model." ) @@ -250,6 +271,7 @@ async def load_agent_on_start( remote_storage=remote_storage, ) + logger.info("Rasa server is up and running.") return app.agent diff --git a/rasa/core/schemas/domain.yml b/rasa/core/schemas/domain.yml index b6c7d6a23574..4428c93e0fba 100644 --- a/rasa/core/schemas/domain.yml +++ b/rasa/core/schemas/domain.yml @@ -1,5 +1,9 @@ allowempty: True mapping: + version: + type: "str" + required: False + allowempty: False intents: type: "seq" sequence: @@ -20,7 +24,7 @@ mapping: sequence: - type: "str" required: True - templates: + responses: type: "map" allowempty: True slots: @@ -30,4 +34,22 @@ mapping: type: "seq" sequence: - type: "str" + - type: "map" + allowempty: True + allowempty: True + config: + type: "map" + allowempty: True + mapping: + store_entities_as_slots: + type: "bool" + session_config: + type: "map" allowempty: True + mapping: + session_expiration_time: + type: "number" + range: + min: 0 + carry_over_slots_to_new_session: + type: "bool" diff --git a/rasa/core/schemas/stories.yml b/rasa/core/schemas/stories.yml new file mode 100644 index 000000000000..6fa74c67e973 --- /dev/null +++ b/rasa/core/schemas/stories.yml @@ -0,0 +1,126 @@ +allowempty: True +mapping: + version: + type: "str" + required: False + allowempty: False + stories: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + story: + type: "str" + allowempty: False + metadata: + type: "any" + required: False + steps: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: &intent_and_entities + intent: + type: "str" + required: True + allowempty: False + entities: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + regex;(.*): + type: "text" + - type: "str" + - type: "map" + mapping: &active_loop + active_loop: + type: "str" + allowempty: False + - type: "map" + mapping: &action + action: + type: "str" + allowempty: False + - type: "map" + mapping: &slot_was_set_seq + slot_was_set: &slot_was_set_seq_value + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + regex;(.*): + type: "text" + - type: "map" + mapping: + regex;(.*): + type: "bool" + - type: "map" + mapping: + regex;(.*): + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + regex;(.*): + type: "text" + - type: "map" + mapping: + regex;(.*): + type: "bool" + - type: "str" + - type: "map" + matching-rule: 'any' + mapping: + checkpoint: + type: "str" + allowempty: False + slot_was_set: *slot_was_set_seq_value + - type: "map" + mapping: + or: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: *intent_and_entities + rules: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + rule: + type: "str" + allowempty: False + steps: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: *intent_and_entities + - type: "map" + mapping: *action + - type: "map" + mapping: *active_loop + - type: "map" + mapping: *slot_was_set_seq + condition: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: *active_loop + - type: "map" + mapping: *slot_was_set_seq + conversation_start: + type: "bool" + allowempty: False + wait_for_user_input: + type: "bool" + allowempty: False diff --git a/rasa/core/slots.py b/rasa/core/slots.py index 08b8cd85f38e..3c530bd497f3 100644 --- a/rasa/core/slots.py +++ b/rasa/core/slots.py @@ -1,42 +1,55 @@ import logging +from typing import Any, Dict, List, NoReturn, Optional, Text, Type from rasa.core import utils -from rasa.utils.common import class_from_module_path +from typing import Any, Dict, List, NoReturn, Optional, Text, Type +from rasa.core.constants import DEFAULT_CATEGORICAL_SLOT_VALUE +from rasa.utils.common import class_from_module_path, raise_warning logger = logging.getLogger(__name__) -class Slot(object): +class Slot: type_name = None def __init__( - self, name, initial_value=None, value_reset_delay=None, auto_fill=True - ): + self, + name: Text, + initial_value: Any = None, + value_reset_delay: Optional[int] = None, + auto_fill: bool = True, + ) -> None: self.name = name self.value = initial_value self.initial_value = initial_value self._value_reset_delay = value_reset_delay self.auto_fill = auto_fill - def feature_dimensionality(self): + def feature_dimensionality(self) -> int: """How many features this single slot creates. The dimensionality of the array returned by `as_feature` needs to correspond to this value.""" return 1 - def has_features(self): + def add_default_value(self) -> None: + """Add a default value to a slots user-defined values""" + raise NotImplementedError( + "Each slot type needs to specify its own" "default value to add, if any" + ) + + def has_features(self) -> bool: """Indicate if the slot creates any features.""" return self.feature_dimensionality() != 0 - def value_reset_delay(self): + def value_reset_delay(self) -> Optional[int]: """After how many turns the slot should be reset to the initial_value. If the delay is set to `None`, the slot will keep its value forever.""" # TODO: FUTURE this needs to be implemented - slots are not reset yet return self._value_reset_delay - def as_feature(self): + def as_feature(self) -> NoReturn: raise NotImplementedError( "Each slot type needs to specify how its " "value can be converted to a feature. Slot " @@ -49,17 +62,17 @@ def as_feature(self): "".format(self.name) ) - def reset(self): + def reset(self) -> None: self.value = self.initial_value - def __str__(self): - return "{}({}: {})".format(self.__class__.__name__, self.name, self.value) + def __str__(self) -> Text: + return f"{self.__class__.__name__}({self.name}: {self.value})" - def __repr__(self): - return "<{}({}: {})>".format(self.__class__.__name__, self.name, self.value) + def __repr__(self) -> Text: + return f"<{self.__class__.__name__}({self.name}: {self.value})>" @staticmethod - def resolve_by_type(type_name): + def resolve_by_type(type_name) -> Type["Slot"]: """Returns a slots class by its type name.""" for cls in utils.all_subclasses(Slot): if cls.type_name == type_name: @@ -73,7 +86,7 @@ def resolve_by_type(type_name): "sure its module path is correct.".format(type_name) ) - def persistence_info(self): + def persistence_info(self) -> Dict[str, Any]: return { "type": utils.module_path_from_instance(self), "initial_value": self.initial_value, @@ -86,16 +99,14 @@ class FloatSlot(Slot): def __init__( self, - name, - initial_value=None, - value_reset_delay=None, - auto_fill=True, - max_value=1.0, - min_value=0.0, - ): - super(FloatSlot, self).__init__( - name, initial_value, value_reset_delay, auto_fill - ) + name: Text, + initial_value: Optional[float] = None, + value_reset_delay: Optional[int] = None, + auto_fill: bool = True, + max_value: float = 1.0, + min_value: float = 0.0, + ) -> None: + super().__init__(name, initial_value, value_reset_delay, auto_fill) self.max_value = max_value self.min_value = min_value @@ -108,13 +119,13 @@ def __init__( ) if initial_value is not None and not (min_value <= initial_value <= max_value): - logger.warning( - "Float slot ('{}') created with an initial value {}" - "outside of configured min ({}) and max ({}) values." - "".format(self.name, self.value, self.min_value, self.max_value) + raise_warning( + f"Float slot ('{self.name}') created with an initial value " + f"{self.value}. This value is outside of the configured min " + f"({self.min_value}) and max ({self.max_value}) values." ) - def as_feature(self): + def as_feature(self) -> List[float]: try: capped_value = max(self.min_value, min(self.max_value, float(self.value))) if abs(self.max_value - self.min_value) > 0: @@ -125,8 +136,8 @@ def as_feature(self): except (TypeError, ValueError): return [0.0] - def persistence_info(self): - d = super(FloatSlot, self).persistence_info() + def persistence_info(self) -> Dict[Text, Any]: + d = super().persistence_info() d["max_value"] = self.max_value d["min_value"] = self.min_value return d @@ -135,7 +146,7 @@ def persistence_info(self): class BooleanSlot(Slot): type_name = "bool" - def as_feature(self): + def as_feature(self) -> List[float]: try: if self.value is not None: return [1.0, float(float(self.value) != 0.0)] @@ -145,21 +156,21 @@ def as_feature(self): # we couldn't convert the value to float - using default value return [0.0, 0.0] - def feature_dimensionality(self): + def feature_dimensionality(self) -> int: return len(self.as_feature()) class TextSlot(Slot): type_name = "text" - def as_feature(self): + def as_feature(self) -> List[float]: return [1.0 if self.value is not None else 0.0] class ListSlot(Slot): type_name = "list" - def as_feature(self): + def as_feature(self) -> List[float]: try: if self.value is not None and len(self.value) > 0: return [1.0] @@ -173,10 +184,10 @@ def as_feature(self): class UnfeaturizedSlot(Slot): type_name = "unfeaturized" - def as_feature(self): + def as_feature(self) -> List[float]: return [] - def feature_dimensionality(self): + def feature_dimensionality(self) -> int: return 0 @@ -185,23 +196,26 @@ class CategoricalSlot(Slot): def __init__( self, - name, - values=None, - initial_value=None, - value_reset_delay=None, - auto_fill=True, - ): - super(CategoricalSlot, self).__init__( - name, initial_value, value_reset_delay, auto_fill - ) + name: Text, + values: Optional[List[Any]] = None, + initial_value: Any = None, + value_reset_delay: Optional[int] = None, + auto_fill: bool = True, + ) -> None: + super().__init__(name, initial_value, value_reset_delay, auto_fill) self.values = [str(v).lower() for v in values] if values else [] - def persistence_info(self): - d = super(CategoricalSlot, self).persistence_info() + def add_default_value(self) -> None: + values = set(self.values) + if DEFAULT_CATEGORICAL_SLOT_VALUE not in values: + self.values.append(DEFAULT_CATEGORICAL_SLOT_VALUE) + + def persistence_info(self) -> Dict[Text, Any]: + d = super().persistence_info() d["values"] = self.values return d - def as_feature(self): + def as_feature(self) -> List[float]: r = [0.0] * self.feature_dimensionality() try: @@ -211,31 +225,39 @@ def as_feature(self): break else: if self.value is not None: - logger.warning( - "Categorical slot '{}' is set to a value ('{}') " - "that is not specified in the domain. " - "Value will be ignored and the slot will " - "behave as if no value is set. " - "Make sure to add all values a categorical " - "slot should store to the domain." - "".format(self.name, self.value) - ) + if DEFAULT_CATEGORICAL_SLOT_VALUE in self.values: + i = self.values.index(DEFAULT_CATEGORICAL_SLOT_VALUE) + r[i] = 1.0 + else: + raise_warning( + f"Categorical slot '{self.name}' is set to a value " + f"('{self.value}') " + "that is not specified in the domain. " + "Value will be ignored and the slot will " + "behave as if no value is set. " + "Make sure to add all values a categorical " + "slot should store to the domain." + ) except (TypeError, ValueError): logger.exception("Failed to featurize categorical slot.") return r return r - def feature_dimensionality(self): + def feature_dimensionality(self) -> int: return len(self.values) class DataSlot(Slot): - def __init__(self, name, initial_value=None, value_reset_delay=1, auto_fill=True): - super(DataSlot, self).__init__( - name, initial_value, value_reset_delay, auto_fill - ) + def __init__( + self, + name: Text, + initial_value: Any = None, + value_reset_delay: Optional[int] = 1, + auto_fill: bool = True, + ): + super().__init__(name, initial_value, value_reset_delay, auto_fill) - def as_feature(self): + def as_feature(self) -> List[float]: raise NotImplementedError( "Each slot type needs to specify how its " "value can be converted to a feature." diff --git a/rasa/core/test.py b/rasa/core/test.py index 0f7133fbc892..abfd665144fe 100644 --- a/rasa/core/test.py +++ b/rasa/core/test.py @@ -3,38 +3,60 @@ import warnings import typing from collections import defaultdict, namedtuple -from typing import Any, Dict, List, Optional, Text, Tuple - +from typing import Any, Dict, List, Optional, Text, Tuple, Union + +import rasa.utils.io as io_utils +from rasa.core.domain import Domain +from rasa.nlu.constants import ( + EXTRACTOR, + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_TEXT, + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_TYPE, +) from rasa.constants import RESULTS_FILE, PERCENTAGE_KEY from rasa.core.utils import pad_lists_to_size from rasa.core.events import ActionExecuted, UserUttered from rasa.nlu.training_data.formats.markdown import MarkdownWriter from rasa.core.trackers import DialogueStateTracker +from rasa.nlu.training_data.formats.readerwriter import TrainingDataWriter +from rasa.utils.io import DEFAULT_ENCODING if typing.TYPE_CHECKING: from rasa.core.agent import Agent + from rasa.core.processor import MessageProcessor + -import matplotlib +CONFUSION_MATRIX_STORIES_FILE = "story_confusion_matrix.png" +REPORT_STORIES_FILE = "story_report.json" +FAILED_STORIES_FILE = "failed_stories.md" +SUCCESSFUL_STORIES_FILE = "successful_stories.md" -matplotlib.use("TkAgg") logger = logging.getLogger(__name__) -StoryEvalution = namedtuple( +StoryEvaluation = namedtuple( "StoryEvaluation", - "evaluation_store failed_stories action_list in_training_data_fraction", + [ + "evaluation_store", + "failed_stories", + "successful_stories", + "action_list", + "in_training_data_fraction", + ], ) -class EvaluationStore(object): +class EvaluationStore: """Class storing action, intent and entity predictions and targets.""" def __init__( self, - action_predictions: Optional[List[str]] = None, - action_targets: Optional[List[str]] = None, - intent_predictions: Optional[List[str]] = None, - intent_targets: Optional[List[str]] = None, + action_predictions: Optional[List[Text]] = None, + action_targets: Optional[List[Text]] = None, + intent_predictions: Optional[List[Text]] = None, + intent_targets: Optional[List[Text]] = None, entity_predictions: Optional[List[Dict[Text, Any]]] = None, entity_targets: Optional[List[Dict[Text, Any]]] = None, ) -> None: @@ -47,10 +69,10 @@ def __init__( def add_to_store( self, - action_predictions: Optional[List[str]] = None, - action_targets: Optional[List[str]] = None, - intent_predictions: Optional[List[str]] = None, - intent_targets: Optional[List[str]] = None, + action_predictions: Optional[Union[Text, List[Text]]] = None, + action_targets: Optional[Union[Text, List[Text]]] = None, + intent_predictions: Optional[Union[Text, List[Text]]] = None, + intent_targets: Optional[Union[Text, List[Text]]] = None, entity_predictions: Optional[List[Dict[Text, Any]]] = None, entity_targets: Optional[List[Dict[Text, Any]]] = None, ) -> None: @@ -74,7 +96,7 @@ def merge_store(self, other: "EvaluationStore") -> None: entity_targets=other.entity_targets, ) - def has_prediction_target_mismatch(self): + def has_prediction_target_mismatch(self) -> bool: return ( self.intent_predictions != self.intent_targets or self.entity_predictions != self.entity_targets @@ -88,7 +110,7 @@ def serialise(self) -> Tuple[List[Text], List[Text]]: self.action_targets + self.intent_targets + [ - MarkdownWriter._generate_entity_md(gold.get("text"), gold) + TrainingDataWriter.generate_entity(gold.get("text"), gold) for gold in self.entity_targets ] ) @@ -96,7 +118,7 @@ def serialise(self) -> Tuple[List[Text], List[Text]]: self.action_predictions + self.intent_predictions + [ - MarkdownWriter._generate_entity_md(predicted.get("text"), predicted) + TrainingDataWriter.generate_entity(predicted.get("text"), predicted) for predicted in self.entity_predictions ] ) @@ -115,16 +137,12 @@ class WronglyPredictedAction(ActionExecuted): def __init__( self, correct_action, predicted_action, policy, confidence, timestamp=None - ): + ) -> None: self.predicted_action = predicted_action - super(WronglyPredictedAction, self).__init__( - correct_action, policy, confidence, timestamp=timestamp - ) + super().__init__(correct_action, policy, confidence, timestamp=timestamp) - def as_story_string(self): - return "{} <!-- predicted: {} -->".format( - self.action_name, self.predicted_action - ) + def as_story_string(self) -> Text: + return f"{self.action_name} <!-- predicted: {self.predicted_action} -->" class EndToEndUserUtterance(UserUttered): @@ -133,8 +151,8 @@ class EndToEndUserUtterance(UserUttered): Mostly used to print the full end-to-end user message in the `failed_stories.md` output file.""" - def as_story_string(self, e2e=True): - return super(EndToEndUserUtterance, self).as_story_string(e2e=True) + def as_story_string(self, e2e: bool = True) -> Text: + return super().as_story_string(e2e=True) class WronglyClassifiedUserUtterance(UserUttered): @@ -145,7 +163,7 @@ class WronglyClassifiedUserUtterance(UserUttered): type_name = "wrong_utterance" - def __init__(self, event: UserUttered, eval_store: EvaluationStore): + def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None: if not eval_store.intent_predictions: self.predicted_intent = None @@ -155,7 +173,7 @@ def __init__(self, event: UserUttered, eval_store: EvaluationStore): intent = {"name": eval_store.intent_targets[0]} - super(WronglyClassifiedUserUtterance, self).__init__( + super().__init__( event.text, intent, eval_store.entity_targets, @@ -164,22 +182,25 @@ def __init__(self, event: UserUttered, eval_store: EvaluationStore): event.input_channel, ) - def as_story_string(self, e2e=True): + def as_story_string(self, e2e: bool = True) -> Text: from rasa.core.events import md_format_message correct_message = md_format_message(self.text, self.intent, self.entities) predicted_message = md_format_message( self.text, self.predicted_intent, self.predicted_entities ) - return "{}: {} <!-- predicted: {}: {} -->".format( - self.intent.get("name"), - correct_message, - self.predicted_intent, - predicted_message, + return ( + f"{self.intent.get('name')}: {correct_message} <!-- predicted: " + f"{self.predicted_intent}: {predicted_message} -->" ) -async def _generate_trackers(resource_name, agent, max_stories=None, use_e2e=False): +async def _generate_trackers( + resource_name: Text, + agent: "Agent", + max_stories: Optional[int] = None, + use_e2e: bool = False, +) -> List[Any]: from rasa.core.training.generator import TrainingDataGenerator from rasa.core import training @@ -204,9 +225,18 @@ def _clean_entity_results( cleaned_entities = [] for r in tuple(entity_results): - cleaned_entity = {"text": text} - for k in ("start", "end", "entity", "value"): + cleaned_entity = {ENTITY_ATTRIBUTE_TEXT: text} + for k in ( + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_VALUE, + ): if k in set(r): + if k == ENTITY_ATTRIBUTE_VALUE and EXTRACTOR in set(r): + # convert values to strings for evaluation as + # target values are all of type string + r[k] = str(r[k]) cleaned_entity[k] = r[k] cleaned_entities.append(cleaned_entity) @@ -257,44 +287,52 @@ def _collect_user_uttered_predictions( return user_uttered_eval_store -def _emulate_form_rejection(processor, partial_tracker): - from rasa.core.policies.form_policy import FormPolicy +def _emulate_form_rejection(partial_tracker: DialogueStateTracker) -> None: from rasa.core.events import ActionExecutionRejected - if partial_tracker.active_form.get("name"): - for p in processor.policy_ensemble.policies: - if isinstance(p, FormPolicy): - # emulate form rejection - partial_tracker.update( - ActionExecutionRejected(partial_tracker.active_form["name"]) - ) - # check if unhappy path is covered by the train stories - if not p.state_is_unhappy(partial_tracker, processor.domain): - # this state is not covered by the stories - del partial_tracker.events[-1] - partial_tracker.active_form["rejected"] = False + rejected_action_name: Text = partial_tracker.active_loop["name"] + partial_tracker.update(ActionExecutionRejected(rejected_action_name)) def _collect_action_executed_predictions( - processor, partial_tracker, event, fail_on_prediction_errors -): + processor: "MessageProcessor", + partial_tracker: DialogueStateTracker, + event: ActionExecuted, + fail_on_prediction_errors: bool, + circuit_breaker_tripped: bool, +) -> Tuple[EvaluationStore, Optional[Text], Optional[float]]: from rasa.core.policies.form_policy import FormPolicy action_executed_eval_store = EvaluationStore() gold = event.action_name - action, policy, confidence = processor.predict_next_action(partial_tracker) - predicted = action.name() - - if policy and predicted != gold and FormPolicy.__name__ in policy: - # FormPolicy predicted wrong action - # but it might be Ok if form action is rejected - _emulate_form_rejection(processor, partial_tracker) - # try again + if circuit_breaker_tripped: + predicted = "circuit breaker tripped" + policy = None + confidence = None + else: action, policy, confidence = processor.predict_next_action(partial_tracker) predicted = action.name() + if ( + policy + and predicted != gold + and _form_might_have_been_rejected( + processor.domain, partial_tracker, predicted + ) + ): + # Wrong action was predicted, + # but it might be Ok if form action is rejected. + _emulate_form_rejection(partial_tracker) + # try again + action, policy, confidence = processor.predict_next_action(partial_tracker) + + # Even if the prediction is also wrong, we don't have to undo the emulation + # of the action rejection as we know that the user explicitly specified + # that something else than the form was supposed to run. + predicted = action.name() + action_executed_eval_store.add_to_store( action_predictions=predicted, action_targets=gold ) @@ -325,10 +363,21 @@ def _collect_action_executed_predictions( return action_executed_eval_store, policy, confidence +def _form_might_have_been_rejected( + domain: Domain, tracker: DialogueStateTracker, predicted_action_name: Text +) -> bool: + return ( + tracker.active_loop.get("name") == predicted_action_name + and predicted_action_name in domain.form_names + ) + + def _predict_tracker_actions( - tracker, agent: "Agent", fail_on_prediction_errors=False, use_e2e=False -): - from rasa.core.trackers import DialogueStateTracker + tracker: DialogueStateTracker, + agent: "Agent", + fail_on_prediction_errors: bool = False, + use_e2e: bool = False, +) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]]]: processor = agent.create_processor() tracker_eval_store = EvaluationStore() @@ -336,15 +385,31 @@ def _predict_tracker_actions( events = list(tracker.events) partial_tracker = DialogueStateTracker.from_events( - tracker.sender_id, events[:1], agent.domain.slots + tracker.sender_id, + events[:1], + agent.domain.slots, + sender_source=tracker.sender_source, ) tracker_actions = [] + should_predict_another_action = True + num_predicted_actions = 0 for event in events[1:]: if isinstance(event, ActionExecuted): - action_executed_result, policy, confidence = _collect_action_executed_predictions( - processor, partial_tracker, event, fail_on_prediction_errors + circuit_breaker_tripped = processor.is_action_limit_reached( + num_predicted_actions, should_predict_another_action + ) + ( + action_executed_result, + policy, + confidence, + ) = _collect_action_executed_predictions( + processor, + partial_tracker, + event, + fail_on_prediction_errors, + circuit_breaker_tripped, ) tracker_eval_store.merge_store(action_executed_result) tracker_actions.append( @@ -355,6 +420,11 @@ def _predict_tracker_actions( "confidence": confidence, } ) + should_predict_another_action = processor.should_predict_another_action( + action_executed_result.action_predictions[0] + ) + num_predicted_actions += 1 + elif use_e2e and isinstance(event, UserUttered): user_uttered_result = _collect_user_uttered_predictions( event, partial_tracker, fail_on_prediction_errors @@ -363,11 +433,13 @@ def _predict_tracker_actions( tracker_eval_store.merge_store(user_uttered_result) else: partial_tracker.update(event) + if isinstance(event, UserUttered): + num_predicted_actions = 0 return tracker_eval_store, partial_tracker, tracker_actions -def _in_training_data_fraction(action_list): +def _in_training_data_fraction(action_list: List[Dict[Text, Any]]) -> float: """Given a list of action items, returns the fraction of actions that were predicted using one of the Memoization policies.""" @@ -382,22 +454,23 @@ def _in_training_data_fraction(action_list): return len(in_training_data) / len(action_list) -def collect_story_predictions( +def _collect_story_predictions( completed_trackers: List["DialogueStateTracker"], agent: "Agent", fail_on_prediction_errors: bool = False, use_e2e: bool = False, -) -> Tuple[StoryEvalution, int]: +) -> Tuple[StoryEvaluation, int]: """Test the stories from a file, running them through the stored model.""" - from rasa.nlu.test import get_evaluation_metrics + from rasa.test import get_evaluation_metrics from tqdm import tqdm story_eval_store = EvaluationStore() failed = [] + success = [] correct_dialogues = [] number_of_stories = len(completed_trackers) - logger.info("Evaluating {} stories\nProgress:".format(number_of_stories)) + logger.info(f"Evaluating {number_of_stories} stories\nProgress:") action_list = [] @@ -416,6 +489,7 @@ def collect_story_predictions( correct_dialogues.append(0) else: correct_dialogues.append(1) + success.append(predicted_tracker) logger.info("Finished collecting predictions.") with warnings.catch_warnings(): @@ -428,7 +502,7 @@ def collect_story_predictions( in_training_data_fraction = _in_training_data_fraction(action_list) - log_evaluation_table( + _log_evaluation_table( [1] * len(completed_trackers), "END-TO-END" if use_e2e else "CONVERSATION", report, @@ -440,9 +514,10 @@ def collect_story_predictions( ) return ( - StoryEvalution( + StoryEvaluation( evaluation_store=story_eval_store, failed_stories=failed, + successful_stories=success, action_list=action_list, in_training_data_fraction=in_training_data_fraction, ), @@ -450,19 +525,22 @@ def collect_story_predictions( ) -def log_failed_stories(failed, out_directory): - """Take stories as a list of dicts.""" +def _log_stories( + stories: List[DialogueStateTracker], filename: Text, out_directory: Text +) -> None: + """Write given stories to the given file.""" if not out_directory: return + with open( - os.path.join(out_directory, "failed_stories.md"), "w", encoding="utf-8" + os.path.join(out_directory, filename), "w", encoding=DEFAULT_ENCODING ) as f: - if len(failed) == 0: - f.write("<!-- All stories passed -->") - else: - for failure in failed: - f.write(failure.export_stories()) - f.write("\n\n") + if not stories: + f.write("<!-- No stories found. -->") + + for story in stories: + f.write(story.export_stories(include_source=True)) + f.write("\n\n") async def test( @@ -472,13 +550,33 @@ async def test( out_directory: Optional[Text] = None, fail_on_prediction_errors: bool = False, e2e: bool = False, -): - """Run the evaluation of the stories, optionally plot the results.""" - from rasa.nlu.test import get_evaluation_metrics + disable_plotting: bool = False, + successes: bool = False, + errors: bool = True, +) -> Dict[Text, Any]: + """Run the evaluation of the stories, optionally plot the results. + + Args: + stories: the stories to evaluate on + agent: the agent + max_stories: maximum number of stories to consider + out_directory: path to directory to results to + fail_on_prediction_errors: boolean indicating whether to fail on prediction + errors or not + e2e: boolean indicating whether to use end to end evaluation or not + disable_plotting: boolean indicating whether to disable plotting or not + successes: boolean indicating whether to write down successful predictions or + not + errors: boolean indicating whether to write down incorrect predictions or not + + Returns: + Evaluation summary. + """ + from rasa.test import get_evaluation_metrics completed_trackers = await _generate_trackers(stories, agent, max_stories, e2e) - story_evaluation, _ = collect_story_predictions( + story_evaluation, _ = _collect_story_predictions( completed_trackers, agent, fail_on_prediction_errors, e2e ) @@ -490,21 +588,46 @@ async def test( warnings.simplefilter("ignore", UndefinedMetricWarning) targets, predictions = evaluation_store.serialise() - report, precision, f1, accuracy = get_evaluation_metrics(targets, predictions) - if out_directory: - plot_story_evaluation( + if out_directory: + report, precision, f1, accuracy = get_evaluation_metrics( + targets, predictions, output_dict=True + ) + + report_filename = os.path.join(out_directory, REPORT_STORIES_FILE) + io_utils.dump_obj_as_json_to_file(report_filename, report) + logger.info(f"Stories report saved to {report_filename}.") + else: + report, precision, f1, accuracy = get_evaluation_metrics( + targets, predictions, output_dict=True + ) + + _log_evaluation_table( + evaluation_store.action_targets, + "ACTION", + report, + precision, + f1, + accuracy, + story_evaluation.in_training_data_fraction, + include_report=False, + ) + + if not disable_plotting: + _plot_story_evaluation( evaluation_store.action_targets, evaluation_store.action_predictions, - report, - precision, - f1, - accuracy, - story_evaluation.in_training_data_fraction, out_directory, ) - log_failed_stories(story_evaluation.failed_stories, out_directory) + if errors: + _log_stories( + story_evaluation.failed_stories, FAILED_STORIES_FILE, out_directory + ) + if successes: + _log_stories( + story_evaluation.successful_stories, SUCCESSFUL_STORIES_FILE, out_directory + ) return { "report": report, @@ -517,77 +640,62 @@ async def test( } -def log_evaluation_table( - golds, - name, - report, - precision, - f1, - accuracy, - in_training_data_fraction, - include_report=True, -): # pragma: no cover +def _log_evaluation_table( + golds: List[Any], + name: Text, + report: Dict[Text, Any], + precision: float, + f1: float, + accuracy: float, + in_training_data_fraction: float, + include_report: bool = True, +) -> None: # pragma: no cover """Log the sklearn evaluation metrics.""" - logger.info("Evaluation Results on {} level:".format(name)) - logger.info( - "\tCorrect: {} / {}".format(int(len(golds) * accuracy), len(golds)) - ) - logger.info("\tF1-Score: {:.3f}".format(f1)) - logger.info("\tPrecision: {:.3f}".format(precision)) - logger.info("\tAccuracy: {:.3f}".format(accuracy)) - logger.info("\tIn-data fraction: {:.3g}".format(in_training_data_fraction)) + logger.info(f"Evaluation Results on {name} level:") + logger.info(f"\tCorrect: {int(len(golds) * accuracy)} / {len(golds)}") + logger.info(f"\tF1-Score: {f1:.3f}") + logger.info(f"\tPrecision: {precision:.3f}") + logger.info(f"\tAccuracy: {accuracy:.3f}") + logger.info(f"\tIn-data fraction: {in_training_data_fraction:.3g}") if include_report: - logger.info("\tClassification report: \n{}".format(report)) - - -def plot_story_evaluation( - test_y, - predictions, - report, - precision, - f1, - accuracy, - in_training_data_fraction, - out_directory, -): - """Plot the results of story evaluation""" + logger.info(f"\tClassification report: \n{report}") + + +def _plot_story_evaluation( + targets: List[Text], predictions: List[Text], output_directory: Optional[Text] +) -> None: + """Plot a confusion matrix of story evaluation.""" from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels - import matplotlib.pyplot as plt - from rasa.nlu.test import plot_confusion_matrix + from rasa.utils.plotting import plot_confusion_matrix - log_evaluation_table( - test_y, - "ACTION", - report, - precision, - f1, - accuracy, - in_training_data_fraction, - include_report=True, - ) + confusion_matrix_filename = CONFUSION_MATRIX_STORIES_FILE + if output_directory: + confusion_matrix_filename = os.path.join( + output_directory, confusion_matrix_filename + ) - cnf_matrix = confusion_matrix(test_y, predictions) + cnf_matrix = confusion_matrix(targets, predictions) plot_confusion_matrix( cnf_matrix, - classes=unique_labels(test_y, predictions), + classes=unique_labels(targets, predictions), title="Action Confusion matrix", + output_file=confusion_matrix_filename, ) - fig = plt.gcf() - fig.set_size_inches(int(20), int(20)) - fig.savefig(os.path.join(out_directory, "story_confmat.pdf"), bbox_inches="tight") - async def compare_models_in_dir( model_dir: Text, stories_file: Text, output: Text ) -> None: - """Evaluates multiple trained models in a directory on a test set.""" - from rasa.core import utils - import rasa.utils.io as io_utils + """Evaluate multiple trained models in a directory on a test set. + Args: + model_dir: path to directory that contains the models to evaluate + stories_file: path to the story file + output: output directory to store results to + """ number_correct = defaultdict(list) for run in io_utils.list_subdirectories(model_dir): @@ -606,112 +714,44 @@ async def compare_models_in_dir( for k, v in number_correct_in_run.items(): number_correct[k].append(v) - utils.dump_obj_as_json_to_file(os.path.join(output, RESULTS_FILE), number_correct) + io_utils.dump_obj_as_json_to_file( + os.path.join(output, RESULTS_FILE), number_correct + ) async def compare_models(models: List[Text], stories_file: Text, output: Text) -> None: - """Evaluates provided trained models on a test set.""" - from rasa.core import utils + """Evaluate provided trained models on a test set. + Args: + models: list of trained model paths + stories_file: path to the story file + output: output directory to store results to + """ number_correct = defaultdict(list) for model in models: number_of_correct_stories = await _evaluate_core_model(model, stories_file) number_correct[os.path.basename(model)].append(number_of_correct_stories) - utils.dump_obj_as_json_to_file(os.path.join(output, RESULTS_FILE), number_correct) + io_utils.dump_obj_as_json_to_file( + os.path.join(output, RESULTS_FILE), number_correct + ) async def _evaluate_core_model(model: Text, stories_file: Text) -> int: from rasa.core.agent import Agent - logger.info("Evaluating model '{}'".format(model)) + logger.info(f"Evaluating model '{model}'") agent = Agent.load(model) completed_trackers = await _generate_trackers(stories_file, agent) - story_eval_store, number_of_stories = collect_story_predictions( + story_eval_store, number_of_stories = _collect_story_predictions( completed_trackers, agent ) failed_stories = story_eval_store.failed_stories return number_of_stories - len(failed_stories) -def plot_nlu_results(output: Text, number_of_examples: List[int]) -> None: - - graph_path = os.path.join(output, "nlu_model_comparison_graph.pdf") - - _plot_curve( - output, - number_of_examples, - x_label_text="Number of intent examples present during training", - y_label_text="Label-weighted average F1 score on test set", - graph_path=graph_path, - ) - - -def plot_core_results(output: Text, number_of_examples: List[int]) -> None: - - graph_path = os.path.join(output, "core_model_comparison_graph.pdf") - - _plot_curve( - output, - number_of_examples, - x_label_text="Number of stories present during training", - y_label_text="Number of correct test stories", - graph_path=graph_path, - ) - - -def _plot_curve( - output: Text, - number_of_examples: List[int], - x_label_text: Text, - y_label_text: Text, - graph_path: Text, -) -> None: - """Plot the results from a model comparison. - - Args: - output: Output directory to save resulting plots to - number_of_examples: Number of examples per run - x_label_text: text for the x axis - y_label_text: text for the y axis - graph_path: output path of the plot - """ - import matplotlib.pyplot as plt - import numpy as np - import rasa.utils.io - - ax = plt.gca() - - # load results from file - data = rasa.utils.io.read_json_file(os.path.join(output, RESULTS_FILE)) - x = number_of_examples - - # compute mean of all the runs for different configs - for label in data.keys(): - if len(data[label]) == 0: - continue - mean = np.mean(data[label], axis=0) - std = np.std(data[label], axis=0) - ax.plot(x, mean, label=label, marker=".") - ax.fill_between( - x, - [m - s for m, s in zip(mean, std)], - [m + s for m, s in zip(mean, std)], - color="#6b2def", - alpha=0.2, - ) - ax.legend(loc=4) - - ax.set_xlabel(x_label_text) - ax.set_ylabel(y_label_text) - - plt.savefig(graph_path, format="pdf") - - logger.info("Comparison graph saved to '{}'.".format(graph_path)) - - if __name__ == "__main__": raise RuntimeError( "Calling `rasa.core.test` directly is no longer supported. Please use " diff --git a/rasa/core/tracker_store.py b/rasa/core/tracker_store.py index b95a9c9c4e3d..f1668a423a8e 100644 --- a/rasa/core/tracker_store.py +++ b/rasa/core/tracker_store.py @@ -1,114 +1,168 @@ import contextlib +import itertools import json import logging import os import pickle -import typing -from typing import Iterator, Optional, Text, Iterable, Union, Dict - -import itertools +from datetime import datetime, timezone -# noinspection PyPep8Naming from time import sleep - +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Text, + Union, + TYPE_CHECKING, +) + +from boto3.dynamodb.conditions import Key +import rasa.core.utils as core_utils from rasa.core.actions.action import ACTION_LISTEN_NAME -from rasa.core.brokers.event_channel import EventChannel +from rasa.core.brokers.broker import EventBroker +from rasa.core.constants import ( + POSTGRESQL_SCHEMA, + POSTGRESQL_MAX_OVERFLOW, + POSTGRESQL_POOL_SIZE, +) +from rasa.core.conversation import Dialogue from rasa.core.domain import Domain +from rasa.core.events import SessionStarted from rasa.core.trackers import ActionExecuted, DialogueStateTracker, EventVerbosity -from rasa.utils.common import class_from_module_path - -if typing.TYPE_CHECKING: +import rasa.cli.utils as rasa_cli_utils +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils.common import class_from_module_path, raise_warning, arguments_of +from rasa.utils.endpoints import EndpointConfig +import sqlalchemy as sa + +if TYPE_CHECKING: + import boto3.resources.factory.dynamodb.Table from sqlalchemy.engine.url import URL from sqlalchemy.engine.base import Engine - from sqlalchemy.orm import Session + from sqlalchemy.orm.session import Session + from sqlalchemy import Sequence + from sqlalchemy.orm.query import Query logger = logging.getLogger(__name__) +# default values of PostgreSQL pool size and max overflow +POSTGRESQL_DEFAULT_MAX_OVERFLOW = 100 +POSTGRESQL_DEFAULT_POOL_SIZE = 50 + + +class TrackerStore: + """Class to hold all of the TrackerStore classes""" -class TrackerStore(object): def __init__( - self, domain: Optional[Domain], event_broker: Optional[EventChannel] = None + self, + domain: Optional[Domain], + event_broker: Optional[EventBroker] = None, + retrieve_events_from_previous_conversation_sessions: bool = False, ) -> None: + """Create a TrackerStore. + + Args: + domain: The `Domain` to initialize the `DialogueStateTracker`. + event_broker: An event broker to publish any new events to another + destination. + retrieve_events_from_previous_conversation_sessions: If `True`, `retrieve` + will return all events (even if they are from a previous conversation + session). This setting only applies to `TrackerStore`s which usually + would only return events for the latest session. + """ self.domain = domain self.event_broker = event_broker self.max_event_history = None + self.load_events_from_previous_conversation_sessions = ( + retrieve_events_from_previous_conversation_sessions + ) @staticmethod - def find_tracker_store(domain, store=None, event_broker=None): - if store is None or store.type is None: - tracker_store = InMemoryTrackerStore(domain, event_broker=event_broker) - elif store.type == "redis": - tracker_store = RedisTrackerStore( - domain=domain, host=store.url, event_broker=event_broker, **store.kwargs - ) - elif store.type == "mongod": - tracker_store = MongoTrackerStore( - domain=domain, host=store.url, event_broker=event_broker, **store.kwargs - ) - elif store.type.lower() == "sql": - tracker_store = SQLTrackerStore( - domain=domain, host=store.url, event_broker=event_broker, **store.kwargs - ) - else: - tracker_store = TrackerStore.load_tracker_from_module_string(domain, store) - - logger.debug("Connected to {}.".format(tracker_store.__class__.__name__)) - return tracker_store - - @staticmethod - def load_tracker_from_module_string(domain, store): - custom_tracker = None - try: - custom_tracker = class_from_module_path(store.type) - except (AttributeError, ImportError): - logger.warning( - "Store type '{}' not found. " - "Using InMemoryTrackerStore instead".format(store.type) - ) + def create( + obj: Union["TrackerStore", EndpointConfig, None], + domain: Optional[Domain] = None, + event_broker: Optional[EventBroker] = None, + ) -> "TrackerStore": + """Factory to create a tracker store.""" - if custom_tracker: - return custom_tracker(domain=domain, url=store.url, **store.kwargs) + if isinstance(obj, TrackerStore): + return obj else: - return InMemoryTrackerStore(domain) + return _create_from_endpoint_config(obj, domain, event_broker) + + def get_or_create_tracker( + self, + sender_id: Text, + max_event_history: Optional[int] = None, + append_action_listen: bool = True, + ) -> "DialogueStateTracker": + """Returns tracker or creates one if the retrieval returns None. - def get_or_create_tracker(self, sender_id, max_event_history=None): + Args: + sender_id: Conversation ID associated with the requested tracker. + max_event_history: Value to update the tracker store's max event history to. + append_action_listen: Whether or not to append an initial `action_listen`. + """ tracker = self.retrieve(sender_id) self.max_event_history = max_event_history if tracker is None: - tracker = self.create_tracker(sender_id) + tracker = self.create_tracker( + sender_id, append_action_listen=append_action_listen + ) return tracker - def init_tracker(self, sender_id): + def init_tracker(self, sender_id: Text) -> "DialogueStateTracker": + """Returns a Dialogue State Tracker""" return DialogueStateTracker( sender_id, self.domain.slots if self.domain else None, max_event_history=self.max_event_history, ) - def create_tracker(self, sender_id, append_action_listen=True): - """Creates a new tracker for the sender_id. + def create_tracker( + self, sender_id: Text, append_action_listen: bool = True + ) -> DialogueStateTracker: + """Creates a new tracker for `sender_id`. + + The tracker begins with a `SessionStarted` event and is initially listening. + + Args: + sender_id: Conversation ID associated with the tracker. + append_action_listen: Whether or not to append an initial `action_listen`. + + Returns: + The newly created tracker for `sender_id`. - The tracker is initially listening.""" + """ tracker = self.init_tracker(sender_id) + if tracker: if append_action_listen: tracker.update(ActionExecuted(ACTION_LISTEN_NAME)) + self.save(tracker) + return tracker def save(self, tracker): + """Save method that will be overridden by specific tracker""" raise NotImplementedError() def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: + """Retrieve method that will be overridden by specific tracker""" raise NotImplementedError() def stream_events(self, tracker: DialogueStateTracker) -> None: + """Streams events to a message broker""" offset = self.number_of_existing_events(tracker.sender_id) - evts = tracker.events - for evt in list(itertools.islice(evts, offset, len(evts))): + events = tracker.events + for event in list(itertools.islice(events, offset, len(events))): body = {"sender_id": tracker.sender_id} - body.update(evt.as_dict()) + body.update(event.as_dict()) self.event_broker.publish(body) def number_of_existing_events(self, sender_id: Text) -> int: @@ -117,51 +171,88 @@ def number_of_existing_events(self, sender_id: Text) -> int: return len(old_tracker.events) if old_tracker else 0 def keys(self) -> Iterable[Text]: + """Returns the set of values for the tracker store's primary key""" raise NotImplementedError() @staticmethod - def serialise_tracker(tracker): + def serialise_tracker(tracker: DialogueStateTracker) -> Text: + """Serializes the tracker, returns representation of the tracker.""" dialogue = tracker.as_dialogue() - return pickle.dumps(dialogue) - def deserialise_tracker(self, sender_id, _json) -> Optional[DialogueStateTracker]: - dialogue = pickle.loads(_json) + return json.dumps(dialogue.as_dict()) + + @staticmethod + def _deserialise_dialogue_from_pickle( + sender_id: Text, serialised_tracker: bytes + ) -> Dialogue: + + logger.warning( + f"Found pickled tracker for " + f"conversation ID '{sender_id}'. Deserialisation of pickled " + f"trackers will be deprecated in version 2.0. Rasa will perform any " + f"future save operations of this tracker using json serialisation." + ) + return pickle.loads(serialised_tracker) + + def deserialise_tracker( + self, sender_id: Text, serialised_tracker: Union[Text, bytes] + ) -> Optional[DialogueStateTracker]: + """Deserializes the tracker and returns it.""" + tracker = self.init_tracker(sender_id) - if tracker: - tracker.recreate_from_dialogue(dialogue) - return tracker - else: + if not tracker: return None + try: + dialogue = Dialogue.from_parameters(json.loads(serialised_tracker)) + except UnicodeDecodeError: + dialogue = self._deserialise_dialogue_from_pickle( + sender_id, serialised_tracker + ) + + tracker.recreate_from_dialogue(dialogue) + + return tracker + class InMemoryTrackerStore(TrackerStore): + """Stores conversation history in memory""" + def __init__( - self, domain: Domain, event_broker: Optional[EventChannel] = None + self, domain: Domain, event_broker: Optional[EventBroker] = None ) -> None: self.store = {} - super(InMemoryTrackerStore, self).__init__(domain, event_broker) + super().__init__(domain, event_broker) def save(self, tracker: DialogueStateTracker) -> None: + """Updates and saves the current conversation state""" if self.event_broker: self.stream_events(tracker) serialised = InMemoryTrackerStore.serialise_tracker(tracker) self.store[tracker.sender_id] = serialised def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: + """ + Args: + sender_id: the message owner ID + + Returns: + DialogueStateTracker + """ if sender_id in self.store: - logger.debug("Recreating tracker for id '{}'".format(sender_id)) + logger.debug(f"Recreating tracker for id '{sender_id}'") return self.deserialise_tracker(sender_id, self.store[sender_id]) else: - logger.debug("Creating a new tracker for id '{}'.".format(sender_id)) + logger.debug(f"Creating a new tracker for id '{sender_id}'.") return None def keys(self) -> Iterable[Text]: + """Returns sender_ids of the Tracker Store in memory""" return self.store.keys() class RedisTrackerStore(TrackerStore): - def keys(self) -> Iterable[Text]: - return self.red.keys() + """Stores conversation history in Redis""" def __init__( self, @@ -169,18 +260,21 @@ def __init__( host="localhost", port=6379, db=0, - password=None, - event_broker=None, - record_exp=None, + password: Optional[Text] = None, + event_broker: Optional[EventBroker] = None, + record_exp: Optional[float] = None, + use_ssl: bool = False, ): - import redis - self.red = redis.StrictRedis(host=host, port=port, db=db, password=password) + self.red = redis.StrictRedis( + host=host, port=port, db=db, password=password, ssl=use_ssl + ) self.record_exp = record_exp - super(RedisTrackerStore, self).__init__(domain, event_broker) + super().__init__(domain, event_broker) def save(self, tracker, timeout=None): + """Saves the current conversation state""" if self.event_broker: self.stream_events(tracker) @@ -191,24 +285,145 @@ def save(self, tracker, timeout=None): self.red.set(tracker.sender_id, serialised_tracker, ex=timeout) def retrieve(self, sender_id): + """ + Args: + sender_id: the message owner ID + + Returns: + DialogueStateTracker + """ stored = self.red.get(sender_id) if stored is not None: return self.deserialise_tracker(sender_id, stored) else: return None + def keys(self) -> Iterable[Text]: + """Returns keys of the Redis Tracker Store""" + return self.red.keys() + + +class DynamoTrackerStore(TrackerStore): + """Stores conversation history in DynamoDB""" + + def __init__( + self, + domain: Domain, + table_name: Text = "states", + region: Text = "us-east-1", + event_broker: Optional[EndpointConfig] = None, + ): + """Initialize `DynamoTrackerStore`. + + Args: + domain: Domain associated with this tracker store. + table_name: The name of the DynamoDB table, does not need to be present a + priori. + region: The name of the region associated with the client. + A client is associated with a single region. + event_broker: An event broker used to publish events. + """ + import boto3 + + self.client = boto3.client("dynamodb", region_name=region) + self.region = region + self.table_name = table_name + self.db = self.get_or_create_table(table_name) + super().__init__(domain, event_broker) + + def get_or_create_table( + self, table_name: Text + ) -> "boto3.resources.factory.dynamodb.Table": + """Returns table or creates one if the table name is not in the table list""" + import boto3 + + dynamo = boto3.resource("dynamodb", region_name=self.region) + try: + self.client.describe_table(TableName=table_name) + except self.client.exceptions.ResourceNotFoundException: + table = dynamo.create_table( + TableName=self.table_name, + KeySchema=[ + {"AttributeName": "sender_id", "KeyType": "HASH"}, + {"AttributeName": "session_date", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "sender_id", "AttributeType": "S"}, + {"AttributeName": "session_date", "AttributeType": "N"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + # Wait until the table exists. + table.meta.client.get_waiter("table_exists").wait(TableName=table_name) + return dynamo.Table(table_name) + + def save(self, tracker): + """Saves the current conversation state""" + if self.event_broker: + self.stream_events(tracker) + self.db.put_item(Item=self.serialise_tracker(tracker)) + + def serialise_tracker(self, tracker: "DialogueStateTracker") -> Dict: + """Serializes the tracker, returns object with decimal types""" + d = tracker.as_dialogue().as_dict() + d.update( + { + "sender_id": tracker.sender_id, + "session_date": int(datetime.now(tz=timezone.utc).timestamp()), + } + ) + # DynamoDB cannot store `float`s, so we'll convert them to `Decimal`s + return core_utils.replace_floats_with_decimals(d) + + def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: + """Create a tracker from all previously stored events.""" + # Retrieve dialogues for a sender_id in reverse-chronological order based on + # the session_date sort key + dialogues = self.db.query( + KeyConditionExpression=Key("sender_id").eq(sender_id), + Limit=1, + ScanIndexForward=False, + )["Items"] + + if not dialogues: + return None + + events = dialogues[0].get("events", []) + + # `float`s are stored as `Decimal` objects - we need to convert them back + events_with_floats = core_utils.replace_decimals_with_floats(events) + + return DialogueStateTracker.from_dict( + sender_id, events_with_floats, self.domain.slots + ) + + def keys(self) -> Iterable[Text]: + """Returns sender_ids of the DynamoTrackerStore""" + return [ + i["sender_id"] + for i in self.db.scan(ProjectionExpression="sender_id")["Items"] + ] + class MongoTrackerStore(TrackerStore): + """ + Stores conversation history in Mongo + + Property methods: + conversations: returns the current conversation + """ + def __init__( self, - domain, - host="mongodb://localhost:27017", - db="rasa", - username=None, - password=None, - auth_source="admin", - collection="conversations", - event_broker=None, + domain: Domain, + host: Optional[Text] = "mongodb://localhost:27017", + db: Optional[Text] = "rasa", + username: Optional[Text] = None, + password: Optional[Text] = None, + auth_source: Optional[Text] = "admin", + collection: Optional[Text] = "conversations", + event_broker: Optional[EventBroker] = None, ): from pymongo.database import Database from pymongo import MongoClient @@ -224,33 +439,105 @@ def __init__( self.db = Database(self.client, db) self.collection = collection - super(MongoTrackerStore, self).__init__(domain, event_broker) + super().__init__(domain, event_broker) self._ensure_indices() @property def conversations(self): + """Returns the current conversation""" return self.db[self.collection] def _ensure_indices(self): + """Create an index on the sender_id""" self.conversations.create_index("sender_id") + @staticmethod + def _current_tracker_state_without_events(tracker: DialogueStateTracker) -> Dict: + # get current tracker state and remove `events` key from state + # since events are pushed separately in the `update_one()` operation + state = tracker.current_state(EventVerbosity.ALL) + state.pop("events", None) + + return state + def save(self, tracker, timeout=None): + """Saves the current conversation state""" if self.event_broker: self.stream_events(tracker) - state = tracker.current_state(EventVerbosity.ALL) + additional_events = self._additional_events(tracker) self.conversations.update_one( - {"sender_id": tracker.sender_id}, {"$set": state}, upsert=True + {"sender_id": tracker.sender_id}, + { + "$set": self._current_tracker_state_without_events(tracker), + "$push": { + "events": {"$each": [e.as_dict() for e in additional_events]} + }, + }, + upsert=True, ) - def retrieve(self, sender_id): + def _additional_events(self, tracker: DialogueStateTracker) -> Iterator: + """Return events from the tracker which aren't currently stored. + + Args: + tracker: Tracker to inspect. + + Returns: + List of serialised events that aren't currently stored. + + """ + + stored = self.conversations.find_one({"sender_id": tracker.sender_id}) or {} + all_events = self._events_from_serialized_tracker(stored) + number_events_since_last_session = len( + self._events_since_last_session_start(all_events) + ) + + return itertools.islice( + tracker.events, number_events_since_last_session, len(tracker.events) + ) + + @staticmethod + def _events_from_serialized_tracker(serialised: Dict) -> List[Dict]: + return serialised.get("events", []) + + @staticmethod + def _events_since_last_session_start(events: List[Dict]) -> List[Dict]: + """Retrieve events since and including the latest `SessionStart` event. + + Args: + events: All events for a conversation ID. + + Returns: + List of serialised events since and including the latest `SessionStarted` + event. Returns all events if no such event is found. + + """ + + events_after_session_start = [] + for event in reversed(events): + events_after_session_start.append(event) + if event["event"] == SessionStarted.type_name: + break + + return list(reversed(events_after_session_start)) + + def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: + """ + Args: + sender_id: the message owner ID + + Returns: + `DialogueStateTracker` + """ stored = self.conversations.find_one({"sender_id": sender_id}) # look for conversations which have used an `int` sender_id in the past # and update them. - if stored is None and sender_id.isdigit(): + if not stored and sender_id.isdigit(): from pymongo import ReturnDocument stored = self.conversations.find_one_and_update( @@ -259,25 +546,113 @@ def retrieve(self, sender_id): return_document=ReturnDocument.AFTER, ) - if stored is not None: - if self.domain: - return DialogueStateTracker.from_dict( - sender_id, stored.get("events"), self.domain.slots - ) - else: - logger.warning( - "Can't recreate tracker from mongo storage " - "because no domain is set. Returning `None` " - "instead." - ) - return None - else: - return None + if not stored: + return + + events = self._events_from_serialized_tracker(stored) + if not self.load_events_from_previous_conversation_sessions: + events = self._events_since_last_session_start(events) + + return DialogueStateTracker.from_dict(sender_id, events, self.domain.slots) def keys(self) -> Iterable[Text]: + """Returns sender_ids of the Mongo Tracker Store""" return [c["sender_id"] for c in self.conversations.find()] +def _create_sequence(table_name: Text) -> "Sequence": + """Creates a sequence object for a specific table name. + + If using Oracle you will need to create a sequence in your database, + as described here: https://rasa.com/docs/rasa/api/tracker-stores/#sqltrackerstore + Args: + table_name: The name of the table, which gets a Sequence assigned + + Returns: A `Sequence` object + """ + + from sqlalchemy.ext.declarative import declarative_base + + sequence_name = f"{table_name}_seq" + Base = declarative_base() + return sa.Sequence(sequence_name, metadata=Base.metadata, optional=True) + + +def is_postgresql_url(url: Union[Text, "URL"]) -> bool: + """Determine whether `url` configures a PostgreSQL connection. + + Args: + url: SQL connection URL. + + Returns: + `True` if `url` is a PostgreSQL connection URL. + """ + if isinstance(url, str): + return "postgresql" in url + + return url.drivername == "postgresql" + + +def create_engine_kwargs(url: Union[Text, "URL"]) -> Dict[Text, Union[Text, int]]: + """Get `sqlalchemy.create_engine()` kwargs. + + Args: + url: SQL connection URL. + + Returns: + kwargs to be passed into `sqlalchemy.create_engine()`. + """ + if not is_postgresql_url(url): + return {} + + kwargs = {} + + schema_name = os.environ.get(POSTGRESQL_SCHEMA) + + if schema_name: + logger.debug(f"Using PostgreSQL schema '{schema_name}'.") + kwargs["connect_args"] = {"options": f"-csearch_path={schema_name}"} + + # pool_size and max_overflow can be set to control the number of + # connections that are kept in the connection pool. Not available + # for SQLite, and only tested for PostgreSQL. See + # https://docs.sqlalchemy.org/en/13/core/pooling.html#sqlalchemy.pool.QueuePool + kwargs["pool_size"] = int( + os.environ.get(POSTGRESQL_POOL_SIZE, POSTGRESQL_DEFAULT_POOL_SIZE) + ) + kwargs["max_overflow"] = int( + os.environ.get(POSTGRESQL_MAX_OVERFLOW, POSTGRESQL_DEFAULT_MAX_OVERFLOW) + ) + + return kwargs + + +def ensure_schema_exists(session: "Session") -> None: + """Ensure that the requested PostgreSQL schema exists in the database. + + Args: + session: Session used to inspect the database. + + Raises: + `ValueError` if the requested schema does not exist. + """ + schema_name = os.environ.get(POSTGRESQL_SCHEMA) + + if not schema_name: + return + + engine = session.get_bind() + + if is_postgresql_url(engine.url): + query = sa.exists( + sa.select([(sa.text("schema_name"))]) + .select_from(sa.text("information_schema.schemata")) + .where(sa.text(f"schema_name = '{schema_name}'")) + ) + if not session.query(query).scalar(): + raise ValueError(schema_name) + + class SQLTrackerStore(TrackerStore): """Store which can save and retrieve trackers from an SQL database.""" @@ -286,17 +661,19 @@ class SQLTrackerStore(TrackerStore): Base = declarative_base() class SQLEvent(Base): - from sqlalchemy import Column, Integer, String, Float, Text + """Represents an event in the SQL Tracker Store""" __tablename__ = "events" - id = Column(Integer, primary_key=True) - sender_id = Column(String(255), nullable=False, index=True) - type_name = Column(String(255), nullable=False) - timestamp = Column(Float) - intent_name = Column(String(255)) - action_name = Column(String(255)) - data = Column(Text) + # `create_sequence` is needed to create a sequence for databases that + # don't autoincrement Integer primary keys (e.g. Oracle) + id = sa.Column(sa.Integer, _create_sequence(__tablename__), primary_key=True) + sender_id = sa.Column(sa.String(255), nullable=False, index=True) + type_name = sa.Column(sa.String(255), nullable=False) + timestamp = sa.Column(sa.Float) + intent_name = sa.Column(sa.String(255)) + action_name = sa.Column(sa.String(255)) + data = sa.Column(sa.Text) def __init__( self, @@ -307,37 +684,27 @@ def __init__( db: Text = "rasa.db", username: Text = None, password: Text = None, - event_broker: Optional[EventChannel] = None, + event_broker: Optional[EventBroker] = None, login_db: Optional[Text] = None, query: Optional[Dict] = None, ) -> None: - from sqlalchemy.orm import sessionmaker - from sqlalchemy import create_engine import sqlalchemy.exc engine_url = self.get_db_url( dialect, host, port, db, username, password, login_db, query ) + + self.engine = sa.engine.create_engine( + engine_url, **create_engine_kwargs(engine_url) + ) + logger.debug( - "Attempting to connect to database via '{}'.".format(repr(engine_url)) + f"Attempting to connect to database via '{repr(self.engine.url)}'." ) # Database might take a while to come up while True: try: - # pool_size and max_overflow can be set to control the number of - # connections that are kept in the connection pool. Not available - # for SQLite, and only tested for postgresql. See - # https://docs.sqlalchemy.org/en/13/core/pooling.html#sqlalchemy.pool.QueuePool - if dialect == "postgresql": - self.engine = create_engine( - engine_url, - pool_size=int(os.environ.get("SQL_POOL_SIZE", "50")), - max_overflow=int(os.environ.get("SQL_MAX_OVERFLOW", "100")), - ) - else: - self.engine = create_engine(engine_url) - # if `login_db` has been provided, use current channel with # that database to create working database `db` if login_db: @@ -352,21 +719,21 @@ def __init__( # Several Rasa services started in parallel may attempt to # create tables at the same time. That is okay so long as # the first services finishes the table creation. - logger.error("Could not create tables: {}".format(e)) + logger.error(f"Could not create tables: {e}") - self.sessionmaker = sessionmaker(bind=self.engine) + self.sessionmaker = sa.orm.session.sessionmaker(bind=self.engine) break except ( sqlalchemy.exc.OperationalError, sqlalchemy.exc.IntegrityError, - ) as e: + ) as error: - logger.warning(e) + logger.warning(error) sleep(5) - logger.debug("Connection to SQL database '{}' successful.".format(db)) + logger.debug(f"Connection to SQL database '{db}' successful.") - super(SQLTrackerStore, self).__init__(domain, event_broker) + super().__init__(domain, event_broker) @staticmethod def get_db_url( @@ -379,7 +746,7 @@ def get_db_url( login_db: Optional[Text] = None, query: Optional[Dict] = None, ) -> Union[Text, "URL"]: - """Builds an SQLAlchemy `URL` object representing the parameters needed + """Build an SQLAlchemy `URL` object representing the parameters needed to connect to an SQL database. Args: @@ -396,25 +763,23 @@ def get_db_url( Returns: URL ready to be used with an SQLAlchemy `Engine` object. - """ - from urllib.parse import urlsplit - from sqlalchemy.engine.url import URL + from urllib import parse # Users might specify a url in the host - parsed = urlsplit(host or "") - if parsed.scheme: + if host and "://" in host: + # assumes this is a complete database host name including + # e.g. `postgres://...` return host - - if host: + elif host: # add fake scheme to properly parse components - parsed = urlsplit("schema://" + host) + parsed = parse.urlsplit(f"scheme://{host}") # users might include the port in the url port = parsed.port or port host = parsed.hostname or host - return URL( + return sa.engine.url.URL( dialect, username, password, @@ -425,8 +790,7 @@ def get_db_url( ) def _create_database_and_update_engine(self, db: Text, engine_url: "URL"): - """Create databse `db` and update engine to reflect the updated - `engine_url`.""" + """Create databse `db` and update engine to reflect the updated `engine_url`.""" from sqlalchemy import create_engine @@ -444,15 +808,13 @@ def _create_database(engine: "Engine", db: Text): cursor = conn.connection.cursor() cursor.execute("COMMIT") - cursor.execute( - ("SELECT 1 FROM pg_catalog.pg_database WHERE datname = '{}'".format(db)) - ) + cursor.execute(f"SELECT 1 FROM pg_catalog.pg_database WHERE datname = '{db}'") exists = cursor.fetchone() if not exists: try: - cursor.execute("CREATE DATABASE {}".format(db)) + cursor.execute(f"CREATE DATABASE {db}") except psycopg2.IntegrityError as e: - logger.error("Could not create database '{}': {}".format(db, e)) + logger.error(f"Could not create database '{db}': {e}") cursor.close() conn.close() @@ -462,11 +824,20 @@ def session_scope(self): """Provide a transactional scope around a series of operations.""" session = self.sessionmaker() try: + ensure_schema_exists(session) yield session + except ValueError as e: + rasa_cli_utils.print_error_and_exit( + f"Requested PostgreSQL schema '{e}' was not found in the database. To " + f"continue, please create the schema by running 'CREATE DATABASE {e};' " + f"or unset the '{POSTGRESQL_SCHEMA}' environment variable in order to " + f"use the default schema. Exiting application." + ) finally: session.close() def keys(self) -> Iterable[Text]: + """Returns sender_ids of the SQLTrackerStore""" with self.session_scope() as session: sender_ids = session.query(self.SQLEvent.sender_id).distinct().all() return [sender_id for (sender_id,) in sender_ids] @@ -475,28 +846,59 @@ def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: """Create a tracker from all previously stored events.""" with self.session_scope() as session: - query = session.query(self.SQLEvent) - result = ( - query.filter_by(sender_id=sender_id) - .order_by(self.SQLEvent.timestamp) - .all() - ) - events = [json.loads(event.data) for event in result] + serialised_events = self._event_query(session, sender_id).all() + + events = [json.loads(event.data) for event in serialised_events] if self.domain and len(events) > 0: - logger.debug("Recreating tracker from sender id '{}'".format(sender_id)) + logger.debug(f"Recreating tracker from sender id '{sender_id}'") return DialogueStateTracker.from_dict( sender_id, events, self.domain.slots ) else: logger.debug( - "Can't retrieve tracker matching " - "sender id '{}' from SQL storage. " - "Returning `None` instead.".format(sender_id) + f"Can't retrieve tracker matching " + f"sender id '{sender_id}' from SQL storage. " + f"Returning `None` instead." ) return None + def _event_query(self, session: "Session", sender_id: Text) -> "Query": + """Provide the query to retrieve the conversation events for a specific sender. + + Args: + session: Current database session. + sender_id: Sender id whose conversation events should be retrieved. + + Returns: + Query to get the conversation events. + """ + # Subquery to find the timestamp of the latest `SessionStarted` event + session_start_sub_query = ( + session.query(sa.func.max(self.SQLEvent.timestamp).label("session_start")) + .filter( + self.SQLEvent.sender_id == sender_id, + self.SQLEvent.type_name == SessionStarted.type_name, + ) + .subquery() + ) + + event_query = session.query(self.SQLEvent).filter( + self.SQLEvent.sender_id == sender_id + ) + if not self.load_events_from_previous_conversation_sessions: + event_query = event_query.filter( + # Find events after the latest `SessionStarted` event or return all + # events + sa.or_( + self.SQLEvent.timestamp >= session_start_sub_query.c.session_start, + session_start_sub_query.c.session_start.is_(None), + ) + ) + + return event_query.order_by(self.SQLEvent.timestamp) + def save(self, tracker: DialogueStateTracker) -> None: """Update database with events from the current conversation.""" @@ -509,8 +911,9 @@ def save(self, tracker: DialogueStateTracker) -> None: for event in events: data = event.as_dict() - - intent = data.get("parse_data", {}).get("intent", {}).get("name") + intent = ( + data.get("parse_data", {}).get("intent", {}).get(INTENT_NAME_KEY) + ) action = data.get("name") timestamp = data.get("timestamp") @@ -527,21 +930,180 @@ def save(self, tracker: DialogueStateTracker) -> None: ) session.commit() - logger.debug( - "Tracker with sender_id '{}' " - "stored to database".format(tracker.sender_id) - ) + logger.debug(f"Tracker with sender_id '{tracker.sender_id}' stored to database") def _additional_events( self, session: "Session", tracker: DialogueStateTracker ) -> Iterator: """Return events from the tracker which aren't currently stored.""" - n_events = ( - session.query(self.SQLEvent.sender_id) - .filter_by(sender_id=tracker.sender_id) - .count() - or 0 + number_of_events_since_last_session = self._event_query( + session, tracker.sender_id + ).count() + return itertools.islice( + tracker.events, number_of_events_since_last_session, len(tracker.events) ) - return itertools.islice(tracker.events, n_events, len(tracker.events)) + +class FailSafeTrackerStore(TrackerStore): + """Wraps a tracker store so that we can fallback to a different tracker store in + case of errors.""" + + def __init__( + self, + tracker_store: TrackerStore, + on_tracker_store_error: Optional[Callable[[Exception], None]] = None, + fallback_tracker_store: Optional[TrackerStore] = None, + ) -> None: + """Create a `FailSafeTrackerStore`. + + Args: + tracker_store: Primary tracker store. + on_tracker_store_error: Callback which is called when there is an error + in the primary tracker store. + """ + + self._fallback_tracker_store: Optional[TrackerStore] = fallback_tracker_store + self._tracker_store = tracker_store + self._on_tracker_store_error = on_tracker_store_error + + super().__init__(tracker_store.domain, tracker_store.event_broker) + + @property + def domain(self) -> Optional[Domain]: + return self._tracker_store.domain + + @domain.setter + def domain(self, domain: Optional[Domain]) -> None: + self._tracker_store.domain = domain + + if self._fallback_tracker_store: + self._fallback_tracker_store.domain = domain + + @property + def fallback_tracker_store(self) -> TrackerStore: + if not self._fallback_tracker_store: + self._fallback_tracker_store = InMemoryTrackerStore( + self._tracker_store.domain, self._tracker_store.event_broker + ) + + return self._fallback_tracker_store + + def on_tracker_store_error(self, error: Exception) -> None: + if self._on_tracker_store_error: + self._on_tracker_store_error(error) + else: + logger.error( + f"Error happened when trying to save conversation tracker to " + f"'{self._tracker_store.__class__.__name__}'. Falling back to use " + f"the '{InMemoryTrackerStore.__name__}'. Please " + f"investigate the following error: {error}." + ) + + def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]: + try: + return self._tracker_store.retrieve(sender_id) + except Exception as e: + self.on_tracker_store_error(e) + return None + + def keys(self) -> Iterable[Text]: + try: + return self._tracker_store.keys() + except Exception as e: + self.on_tracker_store_error(e) + return [] + + def save(self, tracker: DialogueStateTracker) -> None: + try: + self._tracker_store.save(tracker) + except Exception as e: + self.on_tracker_store_error(e) + self.fallback_tracker_store.save(tracker) + + +def _create_from_endpoint_config( + endpoint_config: Optional[EndpointConfig] = None, + domain: Optional[Domain] = None, + event_broker: Optional[EventBroker] = None, +) -> "TrackerStore": + """Given an endpoint configuration, create a proper tracker store object.""" + + domain = domain or Domain.empty() + + if endpoint_config is None or endpoint_config.type is None: + # default tracker store if no type is set + tracker_store = InMemoryTrackerStore(domain, event_broker) + elif endpoint_config.type.lower() == "redis": + tracker_store = RedisTrackerStore( + domain=domain, + host=endpoint_config.url, + event_broker=event_broker, + **endpoint_config.kwargs, + ) + elif endpoint_config.type.lower() == "mongod": + tracker_store = MongoTrackerStore( + domain=domain, + host=endpoint_config.url, + event_broker=event_broker, + **endpoint_config.kwargs, + ) + elif endpoint_config.type.lower() == "sql": + tracker_store = SQLTrackerStore( + domain=domain, + host=endpoint_config.url, + event_broker=event_broker, + **endpoint_config.kwargs, + ) + elif endpoint_config.type.lower() == "dynamo": + tracker_store = DynamoTrackerStore( + domain=domain, event_broker=event_broker, **endpoint_config.kwargs + ) + else: + tracker_store = _load_from_module_name_in_endpoint_config( + domain, endpoint_config, event_broker + ) + + logger.debug(f"Connected to {tracker_store.__class__.__name__}.") + + return tracker_store + + +def _load_from_module_name_in_endpoint_config( + domain: Domain, store: EndpointConfig, event_broker: Optional[EventBroker] = None +) -> "TrackerStore": + """Initializes a custom tracker. + + Defaults to the InMemoryTrackerStore if the module path can not be found. + + Args: + domain: defines the universe in which the assistant operates + store: the specific tracker store + event_broker: an event broker to publish events + + Returns: + a tracker store from a specified type in a stores endpoint configuration + """ + + try: + tracker_store_class = class_from_module_path(store.type) + init_args = arguments_of(tracker_store_class.__init__) + if "url" in init_args and "host" not in init_args: + # DEPRECATION EXCEPTION - remove in 2.1 + raise Exception( + "The `url` initialization argument for custom tracker stores has " + "been removed. Your custom tracker store should take a `host` " + "argument in its `__init__()` instead." + ) + else: + store.kwargs["host"] = store.url + + return tracker_store_class( + domain=domain, event_broker=event_broker, **store.kwargs + ) + except (AttributeError, ImportError): + raise_warning( + f"Tracker store with type '{store.type}' not found. " + f"Using `InMemoryTrackerStore` instead." + ) + return InMemoryTrackerStore(domain) diff --git a/rasa/core/trackers.py b/rasa/core/trackers.py index c2061110e060..547087971ee1 100644 --- a/rasa/core/trackers.py +++ b/rasa/core/trackers.py @@ -2,8 +2,26 @@ import logging from collections import deque from enum import Enum -from typing import Dict, Text, Any, Optional, Iterator, Generator, Type, List +from typing import ( + Dict, + Text, + Any, + Optional, + Iterator, + Generator, + Type, + List, + Deque, + Iterable, + Union, +) +from rasa.nlu.constants import ( + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_GROUP, +) from rasa.core import events # pytype: disable=pyi-error from rasa.core.actions.action import ACTION_LISTEN_NAME # pytype: disable=pyi-error from rasa.core.conversation import Dialogue # pytype: disable=pyi-error @@ -16,14 +34,20 @@ ActionReverted, UserUtteranceReverted, BotUttered, - Form, + ActiveLoop, + SessionStarted, + ActionExecutionRejected, ) from rasa.core.domain import Domain # pytype: disable=pyi-error from rasa.core.slots import Slot +from rasa.utils import common as common_utils logger = logging.getLogger(__name__) +ACTIVE_LOOP_KEY = "active_loop" + + class EventVerbosity(Enum): """Filter on which events to include in tracker dumps.""" @@ -49,15 +73,15 @@ class AnySlotDict(dict): This only uses the generic slot type! This means certain functionality wont work, e.g. properly featurizing the slot.""" - def __missing__(self, key): + def __missing__(self, key) -> Slot: value = self[key] = Slot(key) return value - def __contains__(self, key): + def __contains__(self, key) -> bool: return True -class DialogueStateTracker(object): +class DialogueStateTracker: """Maintains the state of a conversation. The field max_event_history will only give you these last events, @@ -86,13 +110,21 @@ def from_events( evts: List[Event], slots: Optional[List[Slot]] = None, max_event_history: Optional[int] = None, + sender_source: Optional[Text] = None, ): - tracker = cls(sender_id, slots, max_event_history) + tracker = cls(sender_id, slots, max_event_history, sender_source) for e in evts: tracker.update(e) return tracker - def __init__(self, sender_id, slots, max_event_history=None): + def __init__( + self, + sender_id: Text, + slots: Optional[Iterable[Slot]], + max_event_history: Optional[int] = None, + sender_source: Optional[Text] = None, + is_rule_tracker: bool = False, + ) -> None: """Initialize the tracker. A set of events can be stored externally, and we will run through all @@ -107,9 +139,13 @@ def __init__(self, sender_id, slots, max_event_history=None): self.sender_id = sender_id # slots that can be filled in this domain if slots is not None: - self.slots = {slot.name: copy.deepcopy(slot) for slot in slots} + self.slots = {slot.name: copy.copy(slot) for slot in slots} else: self.slots = AnySlotDict() + # file source of the messages + self.sender_source = sender_source + # whether the tracker belongs to a rule-based data + self.is_rule_tracker = is_rule_tracker ### # current state of the tracker - MUST be re-creatable by processing @@ -125,7 +161,7 @@ def __init__(self, sender_id, slots, max_event_history=None): self.latest_message = None self.latest_bot_utterance = None self._reset() - self.active_form = {} + self.active_loop: Dict[Text, Union[Text, bool, Dict, None]] = {} ### # Public tracker interface @@ -135,15 +171,9 @@ def current_state( ) -> Dict[Text, Any]: """Return the current tracker state as an object.""" - if event_verbosity == EventVerbosity.ALL: - evts = [e.as_dict() for e in self.events] - elif event_verbosity == EventVerbosity.AFTER_RESTART: - evts = [e.as_dict() for e in self.events_after_latest_restart()] - elif event_verbosity == EventVerbosity.APPLIED: - evts = [e.as_dict() for e in self.applied_events()] - else: - evts = None - + _events = self._events_for_verbosity(event_verbosity) + if _events: + _events = [e.as_dict() for e in _events] latest_event_time = None if len(self.events) > 0: latest_event_time = self.events[-1].timestamp @@ -155,50 +185,75 @@ def current_state( "latest_event_time": latest_event_time, "followup_action": self.followup_action, "paused": self.is_paused(), - "events": evts, + "events": _events, "latest_input_channel": self.get_latest_input_channel(), - "active_form": self.active_form, + ACTIVE_LOOP_KEY: self.active_loop, "latest_action_name": self.latest_action_name, } + def _events_for_verbosity( + self, event_verbosity: EventVerbosity + ) -> Optional[List[Event]]: + if event_verbosity == EventVerbosity.ALL: + return list(self.events) + if event_verbosity == EventVerbosity.AFTER_RESTART: + return self.events_after_latest_restart() + if event_verbosity == EventVerbosity.APPLIED: + return self.applied_events() + + return None + def past_states(self, domain) -> deque: """Generate the past states of this tracker based on the history.""" generated_states = domain.states_for_tracker_history(self) - return deque((frozenset(s.items()) for s in generated_states)) + return deque(frozenset(s.items()) for s in generated_states) - def change_form_to(self, form_name: Text) -> None: - """Activate or deactivate a form""" - if form_name is not None: - self.active_form = { - "name": form_name, + def change_loop_to(self, loop_name: Text) -> None: + """Set the currently active loop. + + Args: + loop_name: The name of loop which should be marked as active. + """ + if loop_name is not None: + self.active_loop = { + "name": loop_name, "validate": True, "rejected": False, "trigger_message": self.latest_message.parse_data, } else: - self.active_form = {} + self.active_loop = {} + + def change_form_to(self, form_name: Text) -> None: + common_utils.raise_warning( + "`change_form_to` is deprecated and will be removed " + "in future versions. Please use `change_loop_to` " + "instead.", + category=DeprecationWarning, + ) + self.change_loop_to(form_name) def set_form_validation(self, validate: bool) -> None: """Toggle form validation""" - self.active_form["validate"] = validate + self.active_loop["validate"] = validate def reject_action(self, action_name: Text) -> None: - """Notify active form that it was rejected""" - if action_name == self.active_form.get("name"): - self.active_form["rejected"] = True + """Notify active loop that it was rejected""" + if action_name == self.active_loop.get("name"): + self.active_loop["rejected"] = True def set_latest_action_name(self, action_name: Text) -> None: """Set latest action name and reset form validation and rejection parameters """ self.latest_action_name = action_name - if self.active_form.get("name"): - # reset form validation if some form is active - self.active_form["validate"] = True - if action_name == self.active_form.get("name"): - # reset form rejection if it was predicted again - self.active_form["rejected"] = False + if self.active_loop.get("name"): + # reset form validation if some loop is active + self.active_loop["validate"] = True + if action_name == self.active_loop.get("name"): + # reset loop rejection if it was predicted again + self.active_loop["rejected"] = False def current_slot_values(self) -> Dict[Text, Any]: """Return the currently set values of the slots""" @@ -210,20 +265,37 @@ def get_slot(self, key: Text) -> Optional[Any]: if key in self.slots: return self.slots[key].value else: - logger.info("Tried to access non existent slot '{}'".format(key)) + logger.info(f"Tried to access non existent slot '{key}'") return None - def get_latest_entity_values(self, entity_type: Text) -> Iterator[Text]: - """Get entity values found for the passed entity name in latest msg. + def get_latest_entity_values( + self, + entity_type: Text, + entity_role: Optional[Text] = None, + entity_group: Optional[Text] = None, + ) -> Iterator[Text]: + """Get entity values found for the passed entity type and optional role and + group in latest message. If you are only interested in the first entity of a given type use `next(tracker.get_latest_entity_values("my_entity_name"), None)`. - If no entity is found `None` is the default result.""" + If no entity is found `None` is the default result. + + Args: + entity_type: the entity type of interest + entity_role: optional entity role of interest + entity_group: optional entity group of interest + + Returns: + Entity values. + """ return ( - x.get("value") + x.get(ENTITY_ATTRIBUTE_VALUE) for x in self.latest_message.entities - if x.get("entity") == entity_type + if x.get(ENTITY_ATTRIBUTE_TYPE) == entity_type + and (entity_group is None or x.get(ENTITY_ATTRIBUTE_GROUP) == entity_group) + and (entity_role is None or x.get(ENTITY_ATTRIBUTE_ROLE) == entity_role) ) def get_latest_input_channel(self) -> Optional[Text]: @@ -262,105 +334,134 @@ def init_copy(self) -> "DialogueStateTracker": ) def generate_all_prior_trackers( - self + self, ) -> Generator["DialogueStateTracker", None, None]: """Returns a generator of the previous trackers of this tracker. - The resulting array is representing - the trackers before each action.""" + The resulting array is representing the trackers before each action.""" tracker = self.init_copy() - ignored_trackers = [] - latest_message = tracker.latest_message - - for i, event in enumerate(self.applied_events()): - if isinstance(event, UserUttered): - if tracker.active_form.get("name") is None: - # store latest user message before the form - latest_message = event - - elif isinstance(event, Form): - # form got either activated or deactivated, so override - # tracker's latest message - tracker.latest_message = latest_message - - elif isinstance(event, ActionExecuted): - # yields the intermediate state - if tracker.active_form.get("name") is None: - yield tracker - - elif tracker.active_form.get("rejected"): - for tr in ignored_trackers: - yield tr - ignored_trackers = [] - - if not tracker.active_form.get( - "validate" - ) or event.action_name != tracker.active_form.get("name"): - # persist latest user message - # that was rejected by the form - latest_message = tracker.latest_message - else: - # form was called with validation, so - # override tracker's latest message - tracker.latest_message = latest_message - - yield tracker - - elif event.action_name != tracker.active_form.get("name"): - # it is not known whether the form will be - # successfully executed, so store this tracker for later - tr = tracker.copy() - # form was called with validation, so - # override tracker's latest message - tr.latest_message = latest_message - ignored_trackers.append(tr) - - if event.action_name == tracker.active_form.get("name"): - # the form was successfully executed, so - # remove all stored trackers - ignored_trackers = [] + for event in self.applied_events(): + + if isinstance(event, ActionExecuted): + yield tracker tracker.update(event) - # yields the final state - if tracker.active_form.get("name") is None: - yield tracker - elif tracker.active_form.get("rejected"): - for tr in ignored_trackers: - yield tr - yield tracker + yield tracker def applied_events(self) -> List[Event]: """Returns all actions that should be applied - w/o reverted events.""" - def undo_till_previous(event_type, done_events): - """Removes events from `done_events` until the first - occurrence `event_type` is found which is also removed.""" - # list gets modified - hence we need to copy events! - for e in reversed(done_events[:]): - del done_events[-1] - if isinstance(e, event_type): - break + loop_names = [ + event.name + for event in self.events + if isinstance(event, ActiveLoop) and event.name + ] applied_events = [] + for event in self.events: - if isinstance(event, Restarted): + if isinstance(event, (Restarted, SessionStarted)): applied_events = [] elif isinstance(event, ActionReverted): - undo_till_previous(ActionExecuted, applied_events) + self._undo_till_previous(ActionExecuted, applied_events) elif isinstance(event, UserUtteranceReverted): # Seeing a user uttered event automatically implies there was # a listen event right before it, so we'll first rewind the # user utterance, then get the action right before it (also removes # the `action_listen` action right before it). - undo_till_previous(UserUttered, applied_events) - undo_till_previous(ActionExecuted, applied_events) + self._undo_till_previous(UserUttered, applied_events) + self._undo_till_previous(ActionExecuted, applied_events) + elif ( + isinstance(event, ActionExecuted) + and event.action_name in loop_names + and not self._first_loop_execution_or_unhappy_path( + event.action_name, applied_events + ) + ): + self._undo_till_previous_loop_execution( + event.action_name, applied_events + ) else: applied_events.append(event) + return applied_events + @staticmethod + def _undo_till_previous(event_type: Type[Event], done_events: List[Event]) -> None: + """Removes events from `done_events` until the first occurrence `event_type` + is found which is also removed.""" + # list gets modified - hence we need to copy events! + for e in reversed(done_events[:]): + del done_events[-1] + if isinstance(e, event_type): + break + + def _first_loop_execution_or_unhappy_path( + self, loop_action_name: Text, applied_events: List[Event] + ) -> bool: + next_action: Optional[Text] = None + + for event in reversed(applied_events): + # Stop looking for a previous loop execution if there is a loop deactivation + # event because it means that the current loop is running for the first + # time and previous loop events belong to different loops. + if isinstance(event, ActiveLoop) and event.name is None: + return True + + if self._is_within_unhappy_path(loop_action_name, event, next_action): + return True + + if isinstance(event, ActionExecuted): + # We found a previous execution of the loop and we are not within an + # unhappy path. + if event.action_name == loop_action_name: + return False + + # Remember the action as we need that to check whether we might be + # within an unhappy path. + next_action = event.action_name + + return True + + @staticmethod + def _is_within_unhappy_path( + loop_action_name: Text, event: Event, next_action_in_the_future: Optional[Text] + ) -> bool: + # When actual users are talking to the action has to return an + # `ActionExecutionRejected` in order to enter an unhappy path. + loop_was_rejected_previously = ( + isinstance(event, ActionExecutionRejected) + and event.action_name == loop_action_name + ) + # During the policy training there are no `ActionExecutionRejected` events + # which let us see whether we are within an unhappy path. Hence, we check if a + # different action was executed instead of the loop after last user utterance. + other_action_after_latest_user_utterance = ( + isinstance(event, UserUttered) + and next_action_in_the_future is not None + and next_action_in_the_future != loop_action_name + ) + + return loop_was_rejected_previously or other_action_after_latest_user_utterance + + @staticmethod + def _undo_till_previous_loop_execution( + loop_action_name: Text, done_events: List[Event] + ) -> None: + offset = 0 + for e in reversed(done_events[:]): + if isinstance(e, ActionExecuted) and e.action_name == loop_action_name: + break + + if isinstance(e, (ActionExecuted, UserUttered)): + del done_events[-1 - offset] + else: + # Remember events which aren't unfeaturized to get the index right + offset += 1 + def replay_events(self) -> None: """Update the tracker based on a list of events.""" @@ -377,15 +478,15 @@ def recreate_from_dialogue(self, dialogue: Dialogue) -> None: if not isinstance(dialogue, Dialogue): raise ValueError( - "story {0} is not of type Dialogue. " - "Have you deserialized it?".format(dialogue) + f"story {dialogue} is not of type Dialogue. " + f"Have you deserialized it?" ) self._reset() self.events.extend(dialogue.events) self.replay_events() - def copy(self): + def copy(self) -> "DialogueStateTracker": """Creates a duplicate of this tracker""" return self.travel_back_in_time(float("inf")) @@ -427,26 +528,34 @@ def update(self, event: Event, domain: Optional[Domain] = None) -> None: for e in domain.slots_for_entities(event.parse_data["entities"]): self.update(e) - def export_stories(self, e2e=False) -> Text: + def export_stories(self, e2e: bool = False, include_source: bool = False) -> Text: """Dump the tracker as a story in the Rasa Core story format. Returns the dumped tracker as a string.""" from rasa.core.training.structures import Story - story = Story.from_events(self.applied_events(), self.sender_id) + story_name = ( + f"{self.sender_id} ({self.sender_source})" + if include_source + else self.sender_id + ) + story = Story.from_events(self.applied_events(), story_name) return story.as_story_string(flat=True, e2e=e2e) def export_stories_to_file(self, export_path: Text = "debug.md") -> None: """Dump the tracker as a story to a file.""" + import rasa.utils.io - with open(export_path, "a", encoding="utf-8") as f: - f.write(self.export_stories() + "\n") + rasa.utils.io.write_text_file( + self.export_stories() + "\n", export_path, append=True + ) def get_last_event_for( self, event_type: Type[Event], action_names_to_exclude: List[Text] = None, skip: int = 0, + event_verbosity: EventVerbosity = EventVerbosity.APPLIED, ) -> Optional[Event]: """Gets the last event of a given type which was actually applied. @@ -456,6 +565,7 @@ def get_last_event_for( should be excluded from the results. Can be used to skip `action_listen` events. skip: Skips n possible results before return an event. + event_verbosity: Which `EventVerbosity` should be used to search for events. Returns: event which matched the query or `None` if no event matched. @@ -466,16 +576,18 @@ def get_last_event_for( def filter_function(e: Event): has_instance = isinstance(e, event_type) excluded = isinstance(e, ActionExecuted) and e.action_name in to_exclude - return has_instance and not excluded - filtered = filter(filter_function, reversed(self.applied_events())) + filtered = filter( + filter_function, reversed(self._events_for_verbosity(event_verbosity) or []) + ) + for i in range(skip): next(filtered, None) return next(filtered, None) - def last_executed_action_has(self, name: Text, skip=0) -> bool: + def last_executed_action_has(self, name: Text, skip: int = 0) -> bool: """Returns whether last `ActionExecuted` event had a specific name. Args: @@ -486,7 +598,7 @@ def last_executed_action_has(self, name: Text, skip=0) -> bool: `True` if last executed action had name `name`, otherwise `False`. """ - last = self.get_last_event_for( + last: Optional[ActionExecuted] = self.get_last_event_for( ActionExecuted, action_names_to_exclude=[ACTION_LISTEN_NAME], skip=skip ) return last is not None and last.action_name == name @@ -505,7 +617,7 @@ def _reset(self) -> None: self.latest_message = UserUttered.empty() self.latest_bot_utterance = BotUttered.empty() self.followup_action = ACTION_LISTEN_NAME - self.active_form = {} + self.active_loop = {} def _reset_slots(self) -> None: """Set all the slots to their initial value.""" @@ -520,24 +632,23 @@ def _set_slot(self, key: Text, value: Any) -> None: self.slots[key].value = value else: logger.error( - "Tried to set non existent slot '{}'. Make sure you " - "added all your slots to your domain file." - "".format(key) + f"Tried to set non existent slot '{key}'. Make sure you " + f"added all your slots to your domain file." ) - def _create_events(self, evts: List[Event]) -> deque: + def _create_events(self, evts: List[Event]) -> Deque[Event]: if evts and not isinstance(evts[0], Event): # pragma: no cover raise ValueError("events, if given, must be a list of events") return deque(evts, self._max_event_history) - def __eq__(self, other): + def __eq__(self, other) -> bool: if isinstance(self, type(other)): return other.events == self.events and self.sender_id == other.sender_id else: return False - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) def trigger_followup_action(self, action: Text) -> None: @@ -566,3 +677,13 @@ def _merge_slots( if e["entity"] in self.slots.keys() ] return new_slots + + def active_loop_name(self) -> Optional[Text]: + """Get the name of the currently active loop. + + Returns: `None` if no active loop or the name of the currently active loop. + """ + if not self.active_loop: + return None + + return self.active_loop.get("name") diff --git a/rasa/core/train.py b/rasa/core/train.py index 9fe1c7c83a2f..cac846c66b76 100644 --- a/rasa/core/train.py +++ b/rasa/core/train.py @@ -1,18 +1,20 @@ import argparse +import asyncio import logging import os import tempfile import typing from typing import Dict, Optional, Text, Union, List +import rasa.utils.io from rasa.constants import NUMBER_OF_TRAINING_STORIES_FILE, PERCENTAGE_KEY from rasa.core.domain import Domain +from rasa.importers.importer import TrainingDataImporter from rasa.utils.common import TempDirectoryPath if typing.TYPE_CHECKING: from rasa.core.interpreter import NaturalLanguageInterpreter from rasa.core.utils import AvailableEndpoints - from rasa.importers.importer import TrainingDataImporter logger = logging.getLogger(__name__) @@ -24,10 +26,9 @@ async def train( output_path: Text, interpreter: Optional["NaturalLanguageInterpreter"] = None, endpoints: "AvailableEndpoints" = None, - dump_stories: bool = False, policy_config: Optional[Union[Text, Dict]] = None, - exclusion_percentage: int = None, - kwargs: Optional[Dict] = None, + exclusion_percentage: Optional[int] = None, + additional_arguments: Optional[Dict] = None, ): from rasa.core.agent import Agent from rasa.core import config, utils @@ -36,8 +37,8 @@ async def train( if not endpoints: endpoints = AvailableEndpoints() - if not kwargs: - kwargs = {} + if not additional_arguments: + additional_arguments = {} policies = config.load(policy_config) @@ -49,8 +50,8 @@ async def train( policies=policies, ) - data_load_args, kwargs = utils.extract_args( - kwargs, + data_load_args, additional_arguments = utils.extract_args( + additional_arguments, { "use_story_concatenation", "unique_last_num_states", @@ -59,12 +60,11 @@ async def train( "debug_plots", }, ) - training_data = await agent.load_data( training_resource, exclusion_percentage=exclusion_percentage, **data_load_args ) - agent.train(training_data, **kwargs) - agent.persist(output_path, dump_stories) + agent.train(training_data, **additional_arguments) + agent.persist(output_path) return agent @@ -76,8 +76,7 @@ async def train_comparison_models( exclusion_percentages: Optional[List] = None, policy_configs: Optional[List] = None, runs: int = 1, - dump_stories: bool = False, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ): """Train multiple models for comparison of policies""" from rasa import model @@ -106,18 +105,18 @@ async def train_comparison_models( ) with TempDirectoryPath(tempfile.mkdtemp()) as train_path: - await train( - domain, - file_importer, - train_path, - policy_config=policy_config, - exclusion_percentage=percentage, - kwargs=kwargs, - dump_stories=dump_stories, + _, new_fingerprint = await asyncio.gather( + train( + domain, + file_importer, + train_path, + policy_config=policy_config, + exclusion_percentage=percentage, + additional_arguments=additional_arguments, + ), + model.model_fingerprint(file_importer), ) - new_fingerprint = await model.model_fingerprint(file_importer) - output_dir = os.path.join(output_path, "run_" + str(r + 1)) model_name = config_name + PERCENTAGE_KEY + str(percentage) model.package_model( @@ -131,10 +130,10 @@ async def train_comparison_models( async def get_no_of_stories(story_file: Text, domain: Text) -> int: """Get number of stories in a file.""" from rasa.core.domain import TemplateDomain - from rasa.core.training.dsl import StoryFileReader + from rasa.core.training import loading - stories = await StoryFileReader.read_from_folder( - story_file, TemplateDomain.load(domain) + stories = await loading.load_data_from_files( + [story_file], TemplateDomain.load(domain) ) return len(stories) @@ -144,21 +143,19 @@ async def do_compare_training( story_file: Text, additional_arguments: Optional[Dict] = None, ): - from rasa.core import utils - - await train_comparison_models( - story_file, - args.domain, - args.out, - args.percentages, - args.config, - args.runs, - args.dump_stories, - additional_arguments, + _, no_stories = await asyncio.gather( + train_comparison_models( + story_file=story_file, + domain=args.domain, + output_path=args.out, + exclusion_percentages=args.percentages, + policy_configs=args.config, + runs=args.runs, + additional_arguments=additional_arguments, + ), + get_no_of_stories(args.stories, args.domain), ) - no_stories = await get_no_of_stories(args.stories, args.domain) - # store the list of the number of stories present at each exclusion # percentage story_range = [ @@ -168,21 +165,19 @@ async def do_compare_training( training_stories_per_model_file = os.path.join( args.out, NUMBER_OF_TRAINING_STORIES_FILE ) - utils.dump_obj_as_json_to_file(training_stories_per_model_file, story_range) + rasa.utils.io.dump_obj_as_json_to_file(training_stories_per_model_file, story_range) def do_interactive_learning( - args: argparse.Namespace, - stories: Optional[Text] = None, - additional_arguments: Dict[Text, typing.Any] = None, + args: argparse.Namespace, file_importer: TrainingDataImporter ): from rasa.core.training import interactive interactive.run_interactive_learning( - stories, + file_importer=file_importer, skip_visualization=args.skip_visualization, + conversation_id=args.conversation_id, server_args=args.__dict__, - additional_arguments=additional_arguments, ) diff --git a/rasa/core/training/__init__.py b/rasa/core/training/__init__.py index b9a9df22fcf7..3b77dc45606b 100644 --- a/rasa/core/training/__init__.py +++ b/rasa/core/training/__init__.py @@ -1,7 +1,6 @@ -import typing -from typing import Text, List, Optional, Union +from typing import Text, List, Optional, Union, TYPE_CHECKING -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from rasa.core.domain import Domain from rasa.core.interpreter import NaturalLanguageInterpreter from rasa.core.trackers import DialogueStateTracker @@ -9,7 +8,7 @@ from rasa.importers.importer import TrainingDataImporter -async def extract_story_graph( +async def extract_rule_data( resource_name: Text, domain: "Domain", interpreter: Optional["NaturalLanguageInterpreter"] = None, @@ -17,12 +16,35 @@ async def extract_story_graph( exclusion_percentage: int = None, ) -> "StoryGraph": from rasa.core.interpreter import RegexInterpreter - from rasa.core.training.dsl import StoryFileReader + from rasa.core.training import loading + from rasa.core.training.structures import StoryGraph + + if not interpreter: + interpreter = RegexInterpreter() + story_steps = await loading.load_data_from_resource( + resource_name, + domain, + interpreter, + use_e2e=use_e2e, + exclusion_percentage=exclusion_percentage, + ) + return StoryGraph(story_steps) + + +async def extract_story_graph( + resource_name: Text, + domain: "Domain", + interpreter: Optional["NaturalLanguageInterpreter"] = None, + use_e2e: bool = False, + exclusion_percentage: Optional[int] = None, +) -> "StoryGraph": + from rasa.core.interpreter import RegexInterpreter from rasa.core.training.structures import StoryGraph + import rasa.core.training.loading as core_loading if not interpreter: interpreter = RegexInterpreter() - story_steps = await StoryFileReader.read_from_folder( + story_steps = await core_loading.load_data_from_resource( resource_name, domain, interpreter, @@ -41,7 +63,7 @@ async def load_data( tracker_limit: Optional[int] = None, use_story_concatenation: bool = True, debug_plots=False, - exclusion_percentage: int = None, + exclusion_percentage: Optional[int] = None, ) -> List["DialogueStateTracker"]: from rasa.core.training.generator import TrainingDataGenerator from rasa.importers.importer import TrainingDataImporter diff --git a/rasa/core/training/data.py b/rasa/core/training/data.py index 8ad45e36c2a3..dbb5aedceb38 100644 --- a/rasa/core/training/data.py +++ b/rasa/core/training/data.py @@ -1,26 +1,35 @@ +from typing import List, Optional, Tuple + +import numpy as np + + # noinspection PyPep8Naming -class DialogueTrainingData(object): - def __init__(self, X, y, true_length=None): +class DialogueTrainingData: + def __init__( + self, X: np.ndarray, y: np.ndarray, true_length: Optional[List[int]] = None + ) -> None: self.X = X self.y = y self.true_length = true_length - def limit_training_data_to(self, max_samples): + def limit_training_data_to(self, max_samples: int) -> None: self.X = self.X[:max_samples] self.y = self.y[:max_samples] - self.true_length = self.true_length[:max_samples] + self.true_length = ( + self.true_length[:max_samples] if self.true_length is not None else None + ) - def is_empty(self): + def is_empty(self) -> bool: """Check if the training matrix does contain training samples.""" return self.X.shape[0] == 0 - def max_history(self): + def max_history(self) -> int: return self.X.shape[1] - def num_examples(self): + def num_examples(self) -> int: return len(self.y) - def shuffled_X_y(self): + def shuffled_X_y(self) -> Tuple[np.ndarray, np.ndarray]: import numpy as np idx = np.arange(self.num_examples()) diff --git a/rasa/core/training/dsl.py b/rasa/core/training/dsl.py index eecbc6958516..4a52a414cb1a 100644 --- a/rasa/core/training/dsl.py +++ b/rasa/core/training/dsl.py @@ -1,29 +1,12 @@ -# -*- coding: utf-8 -*- -import asyncio -import json import logging -import os import re -import warnings -from typing import Optional, List, Text, Any, Dict, TYPE_CHECKING, Iterable +from typing import Optional, Text, TYPE_CHECKING -import rasa.utils.io as io_utils from rasa.constants import DOCS_BASE_URL -from rasa.core import utils from rasa.core.constants import INTENT_MESSAGE_PREFIX -from rasa.core.events import ActionExecuted, UserUttered, Event, SlotSet -from rasa.core.exceptions import StoryParseError -from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter -from rasa.core.training.structures import ( - Checkpoint, - STORY_START, - StoryStep, - GENERATED_CHECKPOINT_PREFIX, - GENERATED_HASH_LENGTH, - FORM_PREFIX, -) +from rasa.core.interpreter import RegexInterpreter +from rasa.core.training.structures import FORM_PREFIX from rasa.nlu.training_data.formats import MarkdownReader -from rasa.core.domain import Domain if TYPE_CHECKING: from rasa.nlu.training_data import Message @@ -32,430 +15,44 @@ class EndToEndReader(MarkdownReader): + def __init__(self) -> None: + super().__init__() + self._regex_interpreter = RegexInterpreter() + def _parse_item(self, line: Text) -> Optional["Message"]: - """Parses an md list item line based on the current section type. + f"""Parses an md list item line based on the current section type. Matches expressions of the form `<intent>:<example>. For the syntax of <example> see the Rasa docs on NLU training data: - {}/nlu/training-data-format/#markdown-format""".format( - DOCS_BASE_URL - ) - - item_regex = re.compile(r"\s*(.+?):\s*(.*)") + {DOCS_BASE_URL}/nlu/training-data-format/#markdown-format""" + + # Match three groups: + # 1) Potential "form" annotation + # 2) The correct intent + # 3) Optional entities + # 4) The message text + form_group = fr"({FORM_PREFIX}\s*)*" + item_regex = re.compile(r"\s*" + form_group + r"([^{}]+?)({.*})*:\s*(.*)") match = re.match(item_regex, line) - if match: - intent = match.group(1) - self.current_title = intent - message = match.group(2) - example = self._parse_training_example(message) - example.data["true_intent"] = intent - return example - - raise ValueError( - "Encountered invalid end-to-end format for message " - "`{}`. Please visit the documentation page on " - "end-to-end evaluation at {}/user-guide/evaluating-models/" - "end-to-end-evaluation/".format(line, DOCS_BASE_URL) - ) - - -class StoryStepBuilder(object): - def __init__(self, name): - self.name = name - self.story_steps = [] - self.current_steps = [] - self.start_checkpoints = [] - - def add_checkpoint(self, name: Text, conditions: Optional[Dict[Text, Any]]) -> None: - - # Depending on the state of the story part this - # is either a start or an end check point - if not self.current_steps: - self.start_checkpoints.append(Checkpoint(name, conditions)) - else: - if conditions: - logger.warning( - "End or intermediate checkpoints " - "do not support conditions! " - "(checkpoint: {})".format(name) - ) - additional_steps = [] - for t in self.current_steps: - if t.end_checkpoints: - tcp = t.create_copy(use_new_id=True) - tcp.end_checkpoints = [Checkpoint(name)] - additional_steps.append(tcp) - else: - t.end_checkpoints = [Checkpoint(name)] - self.current_steps.extend(additional_steps) - - def _prev_end_checkpoints(self): - if not self.current_steps: - return self.start_checkpoints - else: - # makes sure we got each end name only once - end_names = {e.name for s in self.current_steps for e in s.end_checkpoints} - return [Checkpoint(name) for name in end_names] - - def add_user_messages(self, messages: List[UserUttered]): - self.ensure_current_steps() - - if len(messages) == 1: - # If there is only one possible intent, we'll keep things simple - for t in self.current_steps: - t.add_user_message(messages[0]) - else: - # If there are multiple different intents the - # user can use the express the same thing - # we need to copy the blocks and create one - # copy for each possible message - prefix = GENERATED_CHECKPOINT_PREFIX + "OR_" - generated_checkpoint = utils.generate_id(prefix, GENERATED_HASH_LENGTH) - updated_steps = [] - for t in self.current_steps: - for m in messages: - copied = t.create_copy(use_new_id=True) - copied.add_user_message(m) - copied.end_checkpoints = [Checkpoint(generated_checkpoint)] - updated_steps.append(copied) - self.current_steps = updated_steps - - def add_event(self, event): - self.ensure_current_steps() - for t in self.current_steps: - t.add_event(event) - - def ensure_current_steps(self): - completed = [step for step in self.current_steps if step.end_checkpoints] - unfinished = [step for step in self.current_steps if not step.end_checkpoints] - self.story_steps.extend(completed) - if unfinished: - self.current_steps = unfinished - else: - self.current_steps = self._next_story_steps() - - def flush(self): - if self.current_steps: - self.story_steps.extend(self.current_steps) - self.current_steps = [] - - def _next_story_steps(self): - start_checkpoints = self._prev_end_checkpoints() - if not start_checkpoints: - start_checkpoints = [Checkpoint(STORY_START)] - current_turns = [ - StoryStep(block_name=self.name, start_checkpoints=start_checkpoints) - ] - return current_turns - - -class StoryFileReader(object): - """Helper class to read a story file.""" - - def __init__( - self, - domain: Domain, - interpreter: NaturalLanguageInterpreter, - template_vars: Optional[Dict] = None, - use_e2e: bool = False, - ): - self.story_steps = [] - self.current_step_builder = None # type: Optional[StoryStepBuilder] - self.domain = domain - self.interpreter = interpreter - self.template_variables = template_vars if template_vars else {} - self.use_e2e = use_e2e - - @staticmethod - async def read_from_folder( - resource_name: Text, - domain: Domain, - interpreter: NaturalLanguageInterpreter = RegexInterpreter(), - template_variables: Optional[Dict] = None, - use_e2e: bool = False, - exclusion_percentage: Optional[int] = None, - ) -> List[StoryStep]: - """Given a path reads all contained story files.""" - if not os.path.exists(resource_name): - raise ValueError( - "Story file or folder could not be found. Make " - "sure '{}' exists and points to a story folder " - "or file.".format(os.path.abspath(resource_name)) - ) - - files = io_utils.list_files(resource_name) - - return await StoryFileReader.read_from_files( - files, - domain, - interpreter, - template_variables, - use_e2e, - exclusion_percentage, - ) - - @staticmethod - async def read_from_files( - files: Iterable[Text], - domain: Domain, - interpreter: NaturalLanguageInterpreter = RegexInterpreter(), - template_variables: Optional[Dict] = None, - use_e2e: bool = False, - exclusion_percentage: Optional[int] = None, - ) -> List[StoryStep]: - story_steps = [] - - for f in files: - steps = await StoryFileReader.read_from_file( - f, domain, interpreter, template_variables, use_e2e - ) - story_steps.extend(steps) - - # if exclusion percentage is not 100 - if exclusion_percentage and exclusion_percentage != 100: - import random - - idx = int(round(exclusion_percentage / 100.0 * len(story_steps))) - random.shuffle(story_steps) - story_steps = story_steps[:-idx] - - return story_steps - - @staticmethod - async def read_from_file( - filename: Text, - domain: Domain, - interpreter: NaturalLanguageInterpreter = RegexInterpreter(), - template_variables: Optional[Dict] = None, - use_e2e: bool = False, - ) -> List[StoryStep]: - """Given a md file reads the contained stories.""" - try: - with open(filename, "r", encoding="utf-8") as f: - lines = f.readlines() - reader = StoryFileReader(domain, interpreter, template_variables, use_e2e) - return await reader.process_lines(lines) - except ValueError as err: - file_info = "Invalid story file format. Failed to parse '{}'".format( - os.path.abspath(filename) - ) - logger.exception(file_info) - if not err.args: - err.args = ("",) - err.args = err.args + (file_info,) - raise - - @staticmethod - def _parameters_from_json_string(s: Text, line: Text) -> Dict[Text, Any]: - """Parse the passed string as json and create a parameter dict.""" - - if s is None or not s.strip(): - # if there is no strings there are not going to be any parameters - return {} - - try: - parsed_slots = json.loads(s) - if isinstance(parsed_slots, dict): - return parsed_slots - else: - raise Exception( - "Parsed value isn't a json object " - "(instead parser found '{}')" - ".".format(type(parsed_slots)) - ) - except Exception as e: + if not match: raise ValueError( - "Invalid to parse arguments in line " - "'{}'. Failed to decode parameters" - "as a json object. Make sure the event" - "name is followed by a proper json " - "object. Error: {}".format(line, e) + "Encountered invalid end-to-end format for message " + "`{}`. Please visit the documentation page on " + "end-to-end testing at {}/user-guide/testing-your-assistant/" + "#end-to-end-testing/".format(line, DOCS_BASE_URL) ) - @staticmethod - def _parse_event_line(line): - """Tries to parse a single line as an event with arguments.""" - - # the regex matches "slot{"a": 1}" - m = re.search("^([^{]+)([{].+)?", line) - if m is not None: - event_name = m.group(1).strip() - slots_str = m.group(2) - parameters = StoryFileReader._parameters_from_json_string(slots_str, line) - return event_name, parameters - else: - warnings.warn( - "Failed to parse action line '{}'. Ignoring this line.".format(line) - ) - return "", {} - - async def process_lines(self, lines: List[Text]) -> List[StoryStep]: - multiline_comment = False - - for idx, line in enumerate(lines): - line_num = idx + 1 - try: - line = self._replace_template_variables(self._clean_up_line(line)) - if line.strip() == "": - continue - elif line.startswith("<!--"): - multiline_comment = True - continue - elif multiline_comment and line.endswith("-->"): - multiline_comment = False - continue - elif multiline_comment: - continue - elif line.startswith("#"): - # reached a new story block - name = line[1:].strip("# ") - self.new_story_part(name) - elif line.startswith(">"): - # reached a checkpoint - name, conditions = self._parse_event_line(line[1:].strip()) - self.add_checkpoint(name, conditions) - elif re.match(r"^[*\-]\s+{}".format(FORM_PREFIX), line): - logger.debug( - "Skipping line {}, " - "because it was generated by " - "form action".format(line) - ) - elif line.startswith("-"): - # reached a slot, event, or executed action - event_name, parameters = self._parse_event_line(line[1:]) - self.add_event(event_name, parameters) - elif line.startswith("*"): - # reached a user message - user_messages = [el.strip() for el in line[1:].split(" OR ")] - if self.use_e2e: - await self.add_e2e_messages(user_messages, line_num) - else: - await self.add_user_messages(user_messages, line_num) - else: - # reached an unknown type of line - logger.warning( - "Skipping line {}. " - "No valid command found. " - "Line Content: '{}'" - "".format(line_num, line) - ) - except Exception as e: - msg = "Error in line {}: {}".format(line_num, e) - logger.error(msg, exc_info=1) # pytype: disable=wrong-arg-types - raise ValueError(msg) - self._add_current_stories_to_result() - return self.story_steps - - def _replace_template_variables(self, line: Text) -> Text: - def process_match(matchobject): - varname = matchobject.group(1) - if varname in self.template_variables: - return self.template_variables[varname] - else: - raise ValueError( - "Unknown variable `{var}` " - "in template line '{line}'" - "".format(var=varname, line=line) - ) - - template_rx = re.compile(r"`([^`]+)`") - return template_rx.sub(process_match, line) - - @staticmethod - def _clean_up_line(line: Text) -> Text: - """Removes comments and trailing spaces""" - - return re.sub(r"<!--.*?-->", "", line).strip() - - def _add_current_stories_to_result(self): - if self.current_step_builder: - self.current_step_builder.flush() - self.story_steps.extend(self.current_step_builder.story_steps) - - def new_story_part(self, name): - self._add_current_stories_to_result() - self.current_step_builder = StoryStepBuilder(name) - - def add_checkpoint(self, name: Text, conditions: Optional[Dict[Text, Any]]) -> None: + intent = match.group(2) + self.current_title = intent + message = match.group(4) + example = self.parse_training_example(message) - # Ensure story part already has a name - if not self.current_step_builder: - raise StoryParseError( - "Checkpoint '{}' is at an invalid location. " - "Expected a story start.".format(name) - ) - - self.current_step_builder.add_checkpoint(name, conditions) - - async def _parse_message(self, message: Text, line_num: int): + # If the message starts with the `INTENT_MESSAGE_PREFIX` potential entities + # are annotated in the json format (e.g. `/greet{"name": "Rasa"}) if message.startswith(INTENT_MESSAGE_PREFIX): - parse_data = await RegexInterpreter().parse(message) - else: - parse_data = await self.interpreter.parse(message) - utterance = UserUttered( - message, parse_data.get("intent"), parse_data.get("entities"), parse_data - ) - intent_name = utterance.intent.get("name") - if intent_name not in self.domain.intents: - logger.warning( - "Found unknown intent '{}' on line {}. " - "Please, make sure that all intents are " - "listed in your domain yaml." - "".format(intent_name, line_num) - ) - return utterance - - async def add_user_messages(self, messages, line_num): - if not self.current_step_builder: - raise StoryParseError( - "User message '{}' at invalid location. " - "Expected story start.".format(messages) - ) - parsed_messages = await asyncio.gather( - *[self._parse_message(m, line_num) for m in messages] - ) - self.current_step_builder.add_user_messages(parsed_messages) - - async def add_e2e_messages(self, e2e_messages, line_num): - if not self.current_step_builder: - raise StoryParseError( - "End-to-end message '{}' at invalid " - "location. Expected story start." - "".format(e2e_messages) - ) - e2e_reader = EndToEndReader() - parsed_messages = [] - for m in e2e_messages: - message = e2e_reader._parse_item(m) - parsed = await self._parse_message(message.text, line_num) - - parsed.parse_data["true_intent"] = message.data["true_intent"] - parsed.parse_data["true_entities"] = message.data.get("entities") or [] - parsed_messages.append(parsed) - self.current_step_builder.add_user_messages(parsed_messages) - - def add_event(self, event_name, parameters): - - # add 'name' only if event is not a SlotSet, - # because there might be a slot with slot_key='name' - if "name" not in parameters and event_name != SlotSet.type_name: - parameters["name"] = event_name - - parsed_events = Event.from_story_string( - event_name, parameters, default=ActionExecuted - ) - if parsed_events is None: - raise StoryParseError( - "Unknown event '{}'. It is Neither an event " - "nor an action).".format(event_name) - ) - if self.current_step_builder is None: - raise StoryParseError( - "Failed to handle event '{}'. There is no " - "started story block available. " - "".format(event_name) - ) + parsed = self._regex_interpreter.synchronous_parse(message) + example.data["entities"] = parsed["entities"] - for p in parsed_events: - self.current_step_builder.add_event(p) + example.data["true_intent"] = intent + return example diff --git a/rasa/core/training/generator.py b/rasa/core/training/generator.py index fe9096e6fa5d..938953ebfd44 100644 --- a/rasa/core/training/generator.py +++ b/rasa/core/training/generator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from collections import defaultdict, namedtuple, deque import copy @@ -7,6 +6,7 @@ from tqdm import tqdm from typing import Optional, List, Text, Set, Dict, Tuple +from rasa.constants import DOCS_URL_STORIES from rasa.core import utils from rasa.core.domain import Domain from rasa.core.events import ( @@ -16,7 +16,10 @@ UserUtteranceReverted, Restarted, Event, + SlotSet, + ActiveLoop, ) +from rasa.core.slots import Slot from rasa.core.trackers import DialogueStateTracker from rasa.core.training.structures import ( StoryGraph, @@ -24,7 +27,7 @@ StoryStep, GENERATED_CHECKPOINT_PREFIX, ) -from rasa.utils.common import is_logging_disabled +from rasa.utils.common import is_logging_disabled, raise_warning logger = logging.getLogger(__name__) @@ -44,16 +47,40 @@ class TrackerWithCachedStates(DialogueStateTracker): """A tracker wrapper that caches the state creation of the tracker.""" def __init__( - self, sender_id, slots, max_event_history=None, domain=None, is_augmented=False - ): - super(TrackerWithCachedStates, self).__init__( - sender_id, slots, max_event_history + self, + sender_id: Text, + slots: Optional[List[Slot]], + max_event_history: Optional[int] = None, + domain: Optional[Domain] = None, + is_augmented: bool = False, + is_rule_tracker: bool = False, + ) -> None: + super().__init__( + sender_id, slots, max_event_history, is_rule_tracker=is_rule_tracker ) self._states = None self.domain = domain # T/F property to filter augmented stories self.is_augmented = is_augmented + @classmethod + def from_events( + cls, + sender_id: Text, + evts: List[Event], + slots: Optional[List[Slot]] = None, + max_event_history: Optional[int] = None, + sender_source: Optional[Text] = None, + domain: Optional[Domain] = None, + is_rule_tracker: bool = False, + ) -> "TrackerWithCachedStates": + tracker = cls( + sender_id, slots, max_event_history, domain, is_rule_tracker=is_rule_tracker + ) + for e in evts: + tracker.update(e) + return tracker + def past_states(self, domain: Domain) -> deque: """Return the states of the tracker based on the logged events.""" @@ -65,7 +92,7 @@ def past_states(self, domain: Domain) -> deque: # if don't have it cached, we use the domain to calculate the states # from the events if self._states is None: - self._states = super(TrackerWithCachedStates, self).past_states(domain) + self._states = super().past_states(domain) return self._states @@ -81,9 +108,12 @@ def init_copy(self) -> "TrackerWithCachedStates": self._max_event_history, self.domain, self.is_augmented, + self.is_rule_tracker, ) - def copy(self, sender_id: Text = "") -> "TrackerWithCachedStates": + def copy( + self, sender_id: Text = "", sender_source: Text = "" + ) -> "TrackerWithCachedStates": """Creates a duplicate of this tracker. A new tracker will be created and all events @@ -94,6 +124,7 @@ def copy(self, sender_id: Text = "") -> "TrackerWithCachedStates": tracker = self.init_copy() tracker.sender_id = sender_id + tracker.sender_source = sender_source for event in self.events: tracker.update(event, skip_states=True) @@ -120,7 +151,7 @@ def update(self, event: Event, skip_states: bool = False) -> None: # cached. let's make sure it is there. self._states = self.past_states(self.domain) - super(TrackerWithCachedStates, self).update(event) + super().update(event) if not skip_states: if isinstance(event, ActionExecuted): @@ -144,7 +175,7 @@ def update(self, event: Event, skip_states: bool = False) -> None: TrackersTuple = Tuple[List[TrackerWithCachedStates], List[TrackerWithCachedStates]] -class TrainingDataGenerator(object): +class TrainingDataGenerator: def __init__( self, story_graph: StoryGraph, @@ -187,18 +218,36 @@ def __init__( @staticmethod def _phase_name(everything_reachable_is_reached, phase): if everything_reachable_is_reached: - return "augmentation round {}".format(phase) + return f"augmentation round {phase}" else: - return "data generation round {}".format(phase) + return f"data generation round {phase}" + + def _generate_ml_trackers(self) -> List[TrackerWithCachedStates]: + steps = [step for step in self.story_graph.ordered_steps() if not step.is_rule] + + return self._generate(steps, is_rule_data=False) + + def _generate_rule_trackers(self) -> List[TrackerWithCachedStates]: + steps = [step for step in self.story_graph.ordered_steps() if step.is_rule] + + return self._generate(steps, is_rule_data=True) def generate(self) -> List[TrackerWithCachedStates]: + return self._generate_ml_trackers() + self._generate_rule_trackers() + + def _generate( + self, story_steps: List[StoryStep], is_rule_data: bool = False + ) -> List[TrackerWithCachedStates]: + if not story_steps: + logger.debug(f"No {'rules' if is_rule_data else 'story blocks'} found.") + return [] + if self.config.remove_duplicates and self.config.unique_last_num_states: logger.debug( "Generated trackers will be deduplicated " "based on their unique last {} states." "".format(self.config.unique_last_num_states) ) - self._mark_first_action_in_story_steps_as_unpredictable() active_trackers = defaultdict(list) @@ -208,6 +257,7 @@ def generate(self) -> List[TrackerWithCachedStates]: self.domain.slots, max_event_history=self.config.tracker_limit, domain=self.domain, + is_rule_tracker=is_rule_data, ) active_trackers[STORY_START].append(init_tracker) @@ -217,8 +267,13 @@ def generate(self) -> List[TrackerWithCachedStates]: story_end_trackers = [] phase = 0 # one phase is one traversal of all story steps. - min_num_aug_phases = 3 if self.config.augmentation_factor > 0 else 0 - logger.debug("Number of augmentation rounds is {}".format(min_num_aug_phases)) + + # do not augment rule data + if not is_rule_data: + min_num_aug_phases = 3 if self.config.augmentation_factor > 0 else 0 + logger.debug(f"Number of augmentation rounds is {min_num_aug_phases}") + else: + min_num_aug_phases = 0 # placeholder to track gluing process of checkpoints used_checkpoints = set() @@ -229,6 +284,7 @@ def generate(self) -> List[TrackerWithCachedStates]: # checkpoints that seem to be reachable. This is a heuristic, # if we did not reach any new checkpoints in an iteration, we # assume we have reached all and stop. + while not everything_reachable_is_reached or phase < min_num_aug_phases: phase_name = self._phase_name(everything_reachable_is_reached, phase) @@ -240,19 +296,16 @@ def generate(self) -> List[TrackerWithCachedStates]: "".format(phase_name, num_active_trackers) ) else: - logger.debug("There are no trackers for {}".format(phase_name)) + logger.debug(f"There are no trackers for {phase_name}") break # track unused checkpoints for this phase - unused_checkpoints = set() # type: Set[Text] + unused_checkpoints: Set[Text] = set() - pbar = tqdm( - self.story_graph.ordered_steps(), - desc="Processed Story Blocks", - disable=is_logging_disabled(), - ) + desc = f"Processed {'rules' if is_rule_data else 'story blocks'}" + pbar = tqdm(story_steps, desc=desc, disable=is_logging_disabled()) for step in pbar: - incoming_trackers = [] # type: List[TrackerWithCachedStates] + incoming_trackers: List[TrackerWithCachedStates] = [] for start in step.start_checkpoints: if active_trackers[start.name]: ts = start.filter_trackers(active_trackers[start.name]) @@ -263,7 +316,6 @@ def generate(self) -> List[TrackerWithCachedStates]: # had this start checkpoint as an end checkpoint # it will be processed in next phases unused_checkpoints.add(start.name) - if not incoming_trackers: # if there are no trackers, # we can skip the rest of the loop @@ -276,6 +328,7 @@ def generate(self) -> List[TrackerWithCachedStates]: incoming_trackers, end_trackers = self._remove_duplicate_trackers( incoming_trackers ) + # append end trackers to finished trackers finished_trackers.extend(end_trackers) @@ -289,6 +342,7 @@ def generate(self) -> List[TrackerWithCachedStates]: pbar.set_postfix({"# trackers": "{:d}".format(len(incoming_trackers))}) trackers, end_trackers = self._process_step(step, incoming_trackers) + # add end trackers to finished trackers finished_trackers.extend(end_trackers) @@ -298,7 +352,6 @@ def generate(self) -> List[TrackerWithCachedStates]: # that start with the checkpoint this step ended with for end in step.end_checkpoints: - start_name = self._find_start_checkpoint_name(end.name) active_trackers[start_name].extend(trackers) @@ -314,9 +367,7 @@ def generate(self) -> List[TrackerWithCachedStates]: story_end_trackers.extend(unique_ends) num_finished = len(finished_trackers) + len(story_end_trackers) - logger.debug( - "Finished phase ({} training samples found).".format(num_finished) - ) + logger.debug(f"Finished phase ({num_finished} training samples found).") # prepare next round phase += 1 @@ -370,7 +421,7 @@ def generate(self) -> List[TrackerWithCachedStates]: # augmentation round, so we process only # story end checkpoints # reset used checkpoints - used_checkpoints = set() # type: Set[Text] + used_checkpoints: Set[Text] = set() # generate active trackers for augmentation active_trackers = self._create_start_trackers_for_augmentation( @@ -439,11 +490,11 @@ def _add_unused_end_checkpoints( """ return unused_checkpoints.union( - set( + { start_name for start_name in start_checkpoints if start_name not in used_checkpoints - ) + } ) @staticmethod @@ -530,7 +581,7 @@ def _process_step( new_sender = tracker.sender_id else: new_sender = step.block_name - trackers.append(tracker.copy(new_sender)) + trackers.append(tracker.copy(new_sender, step.source_name)) end_trackers = [] for event in events: @@ -539,6 +590,14 @@ def _process_step( event, (ActionReverted, UserUtteranceReverted, Restarted) ): end_trackers.append(tracker.copy(tracker.sender_id)) + if step.is_rule: + # TODO: this is a hack to make a rule know + # that slot or form should not be set + if isinstance(event, ActiveLoop) and event.name is None: + event.name = "None" + if isinstance(event, SlotSet) and event.value is None: + event.value = "None" + tracker.update(event) # end trackers should be returned separately @@ -604,7 +663,7 @@ def _remove_duplicate_story_end_trackers( for tracker in trackers: states = tuple(tracker.past_states(self.domain)) - hashed = hash(states) + hashed = hash(states + (tracker.is_rule_tracker,)) # only continue with trackers that created a # hashed_featurization we haven't observed @@ -657,12 +716,13 @@ def _issue_unused_checkpoint_notification( that no one provided.""" if STORY_START in unused_checkpoints: - logger.warning( + raise_warning( "There is no starting story block " "in the training data. " "All your story blocks start with some checkpoint. " "There should be at least one story block " - "that starts without any checkpoint." + "that starts without any checkpoint.", + docs=DOCS_URL_STORIES + "#stories", ) # running through the steps first will result in only one warning @@ -684,20 +744,22 @@ def _issue_unused_checkpoint_notification( for cp, block_name in collected_start: if not cp.startswith(GENERATED_CHECKPOINT_PREFIX): - logger.warning( - "Unsatisfied start checkpoint '{}' " - "in block '{}'. " - "Remove this checkpoint or add " - "story blocks that end " - "with this checkpoint.".format(cp, block_name) + raise_warning( + f"Unsatisfied start checkpoint '{cp}' " + f"in block '{block_name}'. " + f"Remove this checkpoint or add " + f"story blocks that end " + f"with this checkpoint.", + docs=DOCS_URL_STORIES + "#checkpoints", ) for cp, block_name in collected_end: if not cp.startswith(GENERATED_CHECKPOINT_PREFIX): - logger.warning( - "Unsatisfied end checkpoint '{}' " - "in block '{}'. " - "Remove this checkpoint or add " - "story blocks that start " - "with this checkpoint.".format(cp, block_name) + raise_warning( + f"Unsatisfied end checkpoint '{cp}' " + f"in block '{block_name}'. " + f"Remove this checkpoint or add " + f"story blocks that start " + f"with this checkpoint.", + docs=DOCS_URL_STORIES + "#checkpoints", ) diff --git a/rasa/core/training/interactive.py b/rasa/core/training/interactive.py index 2cfb5d998467..133bdbf82f39 100644 --- a/rasa/core/training/interactive.py +++ b/rasa/core/training/interactive.py @@ -6,11 +6,15 @@ import uuid from functools import partial from multiprocessing import Process -from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union, Set import numpy as np from aiohttp import ClientError from colorclass import Color + +from rasa.nlu.training_data.loading import MARKDOWN, RASA, RASA_YAML +from rasa.nlu.constants import INTENT_NAME_KEY from sanic import Sanic, response from sanic.exceptions import NotFound from terminaltables import AsciiTable, SingleTable @@ -19,7 +23,7 @@ import rasa.cli.utils from questionary import Choice, Form, Question -from rasa.cli import utils as cliutils +from rasa.cli import utils as cli_utils from rasa.core import constants, run, train, utils from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names from rasa.core.channels.channel import UserMessage @@ -42,14 +46,14 @@ UserUtteranceReverted, ) from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter -from rasa.core.trackers import EventVerbosity, DialogueStateTracker +from rasa.core.trackers import EventVerbosity, DialogueStateTracker, ACTIVE_LOOP_KEY from rasa.core.training import visualization -from rasa.core.training.structures import Story from rasa.core.training.visualization import ( VISUALIZATION_TEMPLATE_PATH, visualize_neighborhood, ) from rasa.core.utils import AvailableEndpoints +from rasa.importers.rasa import TrainingDataImporter from rasa.utils.common import update_sanic_log_level from rasa.utils.endpoints import EndpointConfig @@ -84,6 +88,10 @@ NEW_TEMPLATES = {} +MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION = 200 + +DEFAULT_STORY_GRAPH_FILE = "story_graph.dot" + class RestartConversation(Exception): """Exception used to break out the flow and restart the conversation.""" @@ -117,7 +125,7 @@ class Abort(Exception): async def send_message( endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, message: Text, parse_data: Optional[Dict[Text, Any]] = None, ) -> Dict[Text, Any]: @@ -132,17 +140,17 @@ async def send_message( return await endpoint.request( json=payload, method="post", - subpath="/conversations/{}/messages".format(sender_id), + subpath=f"/conversations/{conversation_id}/messages", ) async def request_prediction( - endpoint: EndpointConfig, sender_id: Text + endpoint: EndpointConfig, conversation_id: Text ) -> Dict[Text, Any]: """Request the next action prediction from core.""" return await endpoint.request( - method="post", subpath="/conversations/{}/predict".format(sender_id) + method="post", subpath=f"/conversations/{conversation_id}/predict" ) @@ -162,14 +170,12 @@ async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]: async def retrieve_tracker( endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, verbosity: EventVerbosity = EventVerbosity.ALL, ) -> Dict[Text, Any]: """Retrieve a tracker from core.""" - path = "/conversations/{}/tracker?include_events={}".format( - sender_id, verbosity.name - ) + path = f"/conversations/{conversation_id}/tracker?include_events={verbosity.name}" return await endpoint.request( method="get", subpath=path, headers={"Accept": "application/json"} ) @@ -177,7 +183,7 @@ async def retrieve_tracker( async def send_action( endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, action_name: Text, policy: Optional[Text] = None, confidence: Optional[float] = None, @@ -187,7 +193,7 @@ async def send_action( payload = ActionExecuted(action_name, policy, confidence).as_dict() - subpath = "/conversations/{}/execute".format(sender_id) + subpath = f"/conversations/{conversation_id}/execute" try: return await endpoint.request(json=payload, method="post", subpath=subpath) @@ -195,30 +201,28 @@ async def send_action( if is_new_action: if action_name in NEW_TEMPLATES: warning_questions = questionary.confirm( - "WARNING: You have created a new action: '{0}', " - "with matching template: '{1}'. " - "This action will not return its message in this session, " - "but the new utterance will be saved to your domain file " - "when you exit and save this session. " - "You do not need to do anything further. " - "".format(action_name, [*NEW_TEMPLATES[action_name]][0]) + f"WARNING: You have created a new action: '{action_name}', " + f"with matching response: '{[*NEW_TEMPLATES[action_name]][0]}'. " + f"This action will not return its message in this session, " + f"but the new response will be saved to your domain file " + f"when you exit and save this session. " + f"You do not need to do anything further." ) - await _ask_questions(warning_questions, sender_id, endpoint) + await _ask_questions(warning_questions, conversation_id, endpoint) else: warning_questions = questionary.confirm( - "WARNING: You have created a new action: '{}', " - "which was not successfully executed. " - "If this action does not return any events, " - "you do not need to do anything. " - "If this is a custom action which returns events, " - "you are recommended to implement this action " - "in your action server and try again." - "".format(action_name) + f"WARNING: You have created a new action: '{action_name}', " + f"which was not successfully executed. " + f"If this action does not return any events, " + f"you do not need to do anything. " + f"If this is a custom action which returns events, " + f"you are recommended to implement this action " + f"in your action server and try again." ) - await _ask_questions(warning_questions, sender_id, endpoint) + await _ask_questions(warning_questions, conversation_id, endpoint) payload = ActionExecuted(action_name).as_dict() - return await send_event(endpoint, sender_id, payload) + return await send_event(endpoint, conversation_id, payload) else: logger.error("failed to execute action!") raise @@ -226,12 +230,12 @@ async def send_action( async def send_event( endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, evt: Union[List[Dict[Text, Any]], Dict[Text, Any]], ) -> Dict[Text, Any]: """Log an event to a conversation.""" - subpath = "/conversations/{}/tracker/events".format(sender_id) + subpath = f"/conversations/{conversation_id}/tracker/events" return await endpoint.request(json=evt, method="post", subpath=subpath) @@ -255,7 +259,7 @@ def format_bot_output(message: BotUttered) -> Text: if data.get("buttons"): output += "\nButtons:" - choices = cliutils.button_choices_from_message_data( + choices = cli_utils.button_choices_from_message_data( data, allow_free_text_input=True ) for choice in choices: @@ -264,13 +268,13 @@ def format_bot_output(message: BotUttered) -> Text: if data.get("elements"): output += "\nElements:" for idx, element in enumerate(data.get("elements")): - element_str = cliutils.element_to_string(element, idx) + element_str = cli_utils.element_to_string(element, idx) output += "\n" + element_str if data.get("quick_replies"): output += "\nQuick replies:" for idx, element in enumerate(data.get("quick_replies")): - element_str = cliutils.element_to_string(element, idx) + element_str = cli_utils.element_to_string(element, idx) output += "\n" + element_str return output @@ -297,7 +301,7 @@ def all_events_before_latest_user_msg( async def _ask_questions( questions: Union[Form, Question], - sender_id: Text, + conversation_id: Text, endpoint: EndpointConfig, is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False, ) -> Any: @@ -309,7 +313,7 @@ async def _ask_questions( while should_retry: answers = questions.ask() if answers is None or is_abort(answers): - should_retry = await _ask_if_quit(sender_id, endpoint) + should_retry = await _ask_if_quit(conversation_id, endpoint) else: should_retry = False return answers @@ -320,74 +324,82 @@ def _selection_choices_from_intent_prediction( ) -> List[Dict[Text, Any]]: """"Given a list of ML predictions create a UI choice list.""" - sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"])) + sorted_intents = sorted( + predictions, key=lambda k: (-k["confidence"], k[INTENT_NAME_KEY]) + ) choices = [] for p in sorted_intents: - name_with_confidence = "{:03.2f} {:40}".format( - p.get("confidence"), p.get("name") + name_with_confidence = ( + f'{p.get("confidence"):03.2f} {p.get(INTENT_NAME_KEY):40}' ) - choice = {"name": name_with_confidence, "value": p.get("name")} + choice = { + INTENT_NAME_KEY: name_with_confidence, + "value": p.get(INTENT_NAME_KEY), + } choices.append(choice) return choices -async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text: +async def _request_free_text_intent( + conversation_id: Text, endpoint: EndpointConfig +) -> Text: question = questionary.text( message="Please type the intent name:", validate=io_utils.not_empty_validator("Please enter an intent name"), ) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) -async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text: +async def _request_free_text_action( + conversation_id: Text, endpoint: EndpointConfig +) -> Text: question = questionary.text( message="Please type the action name:", validate=io_utils.not_empty_validator("Please enter an action name"), ) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) async def _request_free_text_utterance( - sender_id: Text, endpoint: EndpointConfig, action: Text + conversation_id: Text, endpoint: EndpointConfig, action: Text ) -> Text: question = questionary.text( - message=( - "Please type the message for your new utterance " - "template '{}':".format(action) - ), - validate=io_utils.not_empty_validator("Please enter a template message"), + message=(f"Please type the message for your new bot response '{action}':"), + validate=io_utils.not_empty_validator("Please enter a response"), ) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) async def _request_selection_from_intents( - intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig + intents: List[Dict[Text, Text]], conversation_id: Text, endpoint: EndpointConfig ) -> Text: question = questionary.select("What intent is it?", choices=intents) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) async def _request_fork_point_from_list( - forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig + forks: List[Dict[Text, Text]], conversation_id: Text, endpoint: EndpointConfig ) -> Text: question = questionary.select( "Before which user message do you want to fork?", choices=forks ) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) async def _request_fork_from_user( - sender_id, endpoint + conversation_id, endpoint ) -> Optional[List[Dict[Text, Any]]]: """Take in a conversation and ask at which point to fork the conversation. Returns the list of events that should be kept. Forking means, the conversation will be reset and continued from this previous point.""" - tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART) + tracker = await retrieve_tracker( + endpoint, conversation_id, EventVerbosity.AFTER_RESTART + ) choices = [] for i, e in enumerate(tracker.get("events", [])): @@ -395,7 +407,7 @@ async def _request_fork_from_user( choices.append({"name": e.get("text"), "value": i}) fork_idx = await _request_fork_point_from_list( - list(reversed(choices)), sender_id, endpoint + list(reversed(choices)), conversation_id, endpoint ) if fork_idx is not None: @@ -405,7 +417,7 @@ async def _request_fork_from_user( async def _request_intent_from_user( - latest_message, intents, sender_id, endpoint + latest_message, intents, conversation_id, endpoint ) -> Dict[Text, Any]: """Take in latest message and ask which intent it should have been. @@ -413,51 +425,54 @@ async def _request_intent_from_user( predictions = latest_message.get("parse_data", {}).get("intent_ranking", []) - predicted_intents = {p["name"] for p in predictions} + predicted_intents = {p[INTENT_NAME_KEY] for p in predictions} for i in intents: if i not in predicted_intents: - predictions.append({"name": i, "confidence": 0.0}) + predictions.append({INTENT_NAME_KEY: i, "confidence": 0.0}) # convert intents to ui list and add <other> as a free text alternative choices = [ - {"name": "<create_new_intent>", "value": OTHER_INTENT} + {INTENT_NAME_KEY: "<create_new_intent>", "value": OTHER_INTENT} ] + _selection_choices_from_intent_prediction(predictions) - intent_name = await _request_selection_from_intents(choices, sender_id, endpoint) + intent_name = await _request_selection_from_intents( + choices, conversation_id, endpoint + ) if intent_name == OTHER_INTENT: - intent_name = await _request_free_text_intent(sender_id, endpoint) - selected_intent = {"name": intent_name, "confidence": 1.0} + intent_name = await _request_free_text_intent(conversation_id, endpoint) + selected_intent = {INTENT_NAME_KEY: intent_name, "confidence": 1.0} else: # returns the selected intent with the original probability value selected_intent = next( - (x for x in predictions if x["name"] == intent_name), {"name": None} + (x for x in predictions if x[INTENT_NAME_KEY] == intent_name), + {INTENT_NAME_KEY: None}, ) return selected_intent -async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None: +async def _print_history(conversation_id: Text, endpoint: EndpointConfig) -> None: """Print information about the conversation for the user.""" tracker_dump = await retrieve_tracker( - endpoint, sender_id, EventVerbosity.AFTER_RESTART + endpoint, conversation_id, EventVerbosity.AFTER_RESTART ) events = tracker_dump.get("events", []) table = _chat_history_table(events) - slot_strs = _slot_history(tracker_dump) + slot_strings = _slot_history(tracker_dump) - print ("------") - print ("Chat History\n") - print (table) + print("------") + print("Chat History\n") + print(table) - if slot_strs: - print ("\n") - print ("Current slots: \n\t{}\n".format(", ".join(slot_strs))) + if slot_strings: + print("\n") + print(f"Current slots: \n\t{', '.join(slot_strings)}\n") - print ("------") + print("------") def _chat_history_table(events: List[Dict[Text, Any]]) -> Text: @@ -474,13 +489,13 @@ def colored(txt: Text, color: Text) -> Text: def format_user_msg(user_event: UserUttered, max_width: int) -> Text: intent = user_event.intent or {} - intent_name = intent.get("name", "") + intent_name = intent.get(INTENT_NAME_KEY, "") _confidence = intent.get("confidence", 1.0) _md = _as_md_message(user_event.parse_data) _lines = [ colored(wrap(_md, max_width), "hired"), - "intent: {} {:03.2f}".format(intent_name, _confidence), + f"intent: {intent_name} {_confidence:03.2f}", ] return "\n".join(_lines) @@ -518,9 +533,7 @@ def add_user_cell(data, cell): if isinstance(event, ActionExecuted): bot_column.append(colored(event.action_name, "autocyan")) if event.confidence is not None: - bot_column[-1] += colored( - " {:03.2f}".format(event.confidence), "autowhite" - ) + bot_column[-1] += colored(f" {event.confidence:03.2f}", "autowhite") elif isinstance(event, UserUttered): if bot_column: @@ -555,34 +568,34 @@ def add_user_cell(data, cell): def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]: """Create an array of slot representations to be displayed.""" - slot_strs = [] + slot_strings = [] for k, s in tracker_dump.get("slots", {}).items(): - colored_value = cliutils.wrap_with_color( + colored_value = cli_utils.wrap_with_color( str(s), color=rasa.cli.utils.bcolors.WARNING ) - slot_strs.append("{}: {}".format(k, colored_value)) - return slot_strs + slot_strings.append(f"{k}: {colored_value}") + return slot_strings -async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig): +async def _write_data_to_file(conversation_id: Text, endpoint: EndpointConfig): """Write stories and nlu data to file.""" story_path, nlu_path, domain_path = _request_export_info() - tracker = await retrieve_tracker(endpoint, sender_id) + tracker = await retrieve_tracker(endpoint, conversation_id) events = tracker.get("events", []) serialised_domain = await retrieve_domain(endpoint) domain = Domain.from_dict(serialised_domain) - await _write_stories_to_file(story_path, events, domain) - await _write_nlu_to_file(nlu_path, events) - await _write_domain_to_file(domain_path, events, domain) + _write_stories_to_file(story_path, events, domain) + _write_nlu_to_file(nlu_path, events) + _write_domain_to_file(domain_path, events, domain) logger.info("Successfully wrote stories and NLU data") -async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool: +async def _ask_if_quit(conversation_id: Text, endpoint: EndpointConfig) -> bool: """Display the exit menu. Return `True` if the previous question should be retried.""" @@ -600,7 +613,7 @@ async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool: if not answer or answer == "quit": # this is also the default answer if the user presses Ctrl-C - await _write_data_to_file(sender_id, endpoint) + await _write_data_to_file(conversation_id, endpoint) raise Abort() elif answer == "continue": # in this case we will just return, and the original @@ -615,21 +628,21 @@ async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool: async def _request_action_from_user( - predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig + predictions: List[Dict[Text, Any]], conversation_id: Text, endpoint: EndpointConfig ) -> Tuple[Text, bool]: """Ask the user to correct an action prediction.""" - await _print_history(sender_id, endpoint) + await _print_history(conversation_id, endpoint) choices = [ { - "name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")), + "name": f'{a.get("score"):03.2f} {a.get("action"):40}', "value": a.get("action"), } for a in predictions ] - tracker = await retrieve_tracker(endpoint, sender_id) + tracker = await retrieve_tracker(endpoint, conversation_id) events = tracker.get("events", []) session_actions_all = [a["name"] for a in _collect_actions(events)] @@ -645,15 +658,15 @@ async def _request_action_from_user( ) question = questionary.select("What is the next action of the bot?", choices) - action_name = await _ask_questions(question, sender_id, endpoint) + action_name = await _ask_questions(question, conversation_id, endpoint) is_new_action = action_name == NEW_ACTION if is_new_action: # create new action - action_name = await _request_free_text_action(sender_id, endpoint) + action_name = await _request_free_text_action(conversation_id, endpoint) if action_name.startswith(UTTER_PREFIX): utter_message = await _request_free_text_utterance( - sender_id, endpoint, action_name + conversation_id, endpoint, action_name ) NEW_TEMPLATES[action_name] = {utter_message: ""} @@ -662,7 +675,7 @@ async def _request_action_from_user( is_new_action = True action_name = action_name[32:] - print ("Thanks! The bot will now run {}.\n".format(action_name)) + print(f"Thanks! The bot will now run {action_name}.\n") return action_name, is_new_action @@ -685,7 +698,7 @@ def _request_export_info() -> Tuple[Text, Text, Text]: "merge learned data with previous training examples)", default=PATHS["nlu"], validate=io_utils.file_type_validator( - [".md"], + [".md", ".json"], "Please provide a valid export path for the NLU data, e.g. 'nlu.md'.", ), ), @@ -704,7 +717,7 @@ def _request_export_info() -> Tuple[Text, Text, Text]: if not answers: raise Abort() - return (answers["export_stories"], answers["export_nlu"], answers["export_domain"]) + return answers["export_stories"], answers["export_nlu"], answers["export_domain"] def _split_conversation_at_restarts( @@ -733,39 +746,23 @@ def _split_conversation_at_restarts( def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]: """Collect the message text and parsed data from the UserMessage events into a list""" - from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor - from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor - from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor - msgs = [] + import rasa.nlu.training_data.util as rasa_nlu_training_data_utils + + messages = [] for event in events: if event.get("event") == UserUttered.type_name: data = event.get("parse_data", {}) + rasa_nlu_training_data_utils.remove_untrainable_entities_from(data) + msg = Message.build( + data["text"], data["intent"][INTENT_NAME_KEY], data["entities"] + ) + messages.append(msg) + elif event.get("event") == UserUtteranceReverted.type_name and messages: + messages.pop() # user corrected the nlu, remove incorrect example - for entity in data.get("entities", []): - - excluded_extractors = [ - DucklingHTTPExtractor.__name__, - SpacyEntityExtractor.__name__, - MitieEntityExtractor.__name__, - ] - logger.debug( - "Exclude entity marking of following extractors" - " {} when writing nlu data " - "to file.".format(excluded_extractors) - ) - - if entity.get("extractor") in excluded_extractors: - data["entities"].remove(entity) - - msg = Message.build(data["text"], data["intent"]["name"], data["entities"]) - msgs.append(msg) - - elif event.get("event") == UserUtteranceReverted.type_name and msgs: - msgs.pop() # user corrected the nlu, remove incorrect example - - return msgs + return messages def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]: @@ -774,10 +771,10 @@ def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]: return [evt for evt in events if evt.get("event") == ActionExecuted.type_name] -async def _write_stories_to_file( +def _write_stories_to_file( export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain ) -> None: - """Write the conversation of the sender_id to the file paths.""" + """Write the conversation of the conversation_id to the file paths.""" sub_conversations = _split_conversation_at_restarts(events) @@ -788,12 +785,12 @@ async def _write_stories_to_file( else: append_write = "w" # make a new file if not - with open(export_story_path, append_write, encoding="utf-8") as f: + with open(export_story_path, append_write, encoding=io_utils.DEFAULT_ENCODING) as f: i = 1 for conversation in sub_conversations: parsed_events = rasa.core.events.deserialise_events(conversation) tracker = DialogueStateTracker.from_events( - "interactive_story_{}".format(i), evts=parsed_events, slots=domain.slots + f"interactive_story_{i}", evts=parsed_events, slots=domain.slots ) if any( @@ -803,20 +800,29 @@ async def _write_stories_to_file( f.write("\n" + tracker.export_stories(SAVE_IN_E2E)) -async def _write_nlu_to_file( - export_nlu_path: Text, events: List[Dict[Text, Any]] -) -> None: - """Write the nlu data of the sender_id to the file paths.""" +def _filter_messages(msgs: List[Message]) -> List[Message]: + """Filter messages removing those that start with INTENT_MESSAGE_PREFIX""" + + filtered_messages = [] + for msg in msgs: + if not msg.text.startswith(INTENT_MESSAGE_PREFIX): + filtered_messages.append(msg) + return filtered_messages + + +def _write_nlu_to_file(export_nlu_path: Text, events: List[Dict[Text, Any]]) -> None: + """Write the nlu data of the conversation_id to the file paths.""" from rasa.nlu.training_data import TrainingData msgs = _collect_messages(events) + msgs = _filter_messages(msgs) # noinspection PyBroadException try: previous_examples = loading.load_data(export_nlu_path) except Exception as e: logger.debug( - "An exception occurred while trying to load the NLU data. {}".format(str(e)) + f"An exception occurred while trying to load the NLU data. {str(e)}" ) # No previous file exists, use empty training data as replacement. previous_examples = TrainingData() @@ -825,24 +831,43 @@ async def _write_nlu_to_file( # need to guess the format of the file before opening it to avoid a read # in a write - if loading.guess_format(export_nlu_path) in {"md", "unk"}: - fformat = "md" + nlu_format = _get_nlu_target_format(export_nlu_path) + if nlu_format == RASA_YAML: + stringified_training_data = nlu_data.nlu_as_yaml() + elif nlu_format == MARKDOWN: + stringified_training_data = nlu_data.nlu_as_markdown() else: - fformat = "json" + stringified_training_data = nlu_data.nlu_as_json() - with open(export_nlu_path, "w", encoding="utf-8") as f: - if fformat == "md": - f.write(nlu_data.nlu_as_markdown()) - else: - f.write(nlu_data.nlu_as_json()) + io_utils.write_text_file(stringified_training_data, export_nlu_path) + + +def _get_nlu_target_format(export_path: Text) -> Text: + from rasa.data import ( + YAML_FILE_EXTENSIONS, + MARKDOWN_FILE_EXTENSIONS, + JSON_FILE_EXTENSIONS, + ) + + guessed_format = loading.guess_format(export_path) + + if guessed_format not in {MARKDOWN, RASA, RASA_YAML}: + if Path(export_path).suffix in JSON_FILE_EXTENSIONS: + guessed_format = RASA + elif Path(export_path).suffix in MARKDOWN_FILE_EXTENSIONS: + guessed_format = MARKDOWN + elif Path(export_path).suffix in YAML_FILE_EXTENSIONS: + guessed_format = RASA_YAML + return guessed_format -def _entities_from_messages(messages): - """Return all entities that occur in atleast one of the messages.""" + +def _entities_from_messages(messages: List[Message]) -> List[Text]: + """Return all entities that occur in at least one of the messages.""" return list({e["entity"] for m in messages for e in m.data.get("entities", [])}) -def _intents_from_messages(messages): +def _intents_from_messages(messages: List[Message]) -> Set[Text]: """Return all intents that occur in at least one of the messages.""" # set of distinct intents @@ -851,7 +876,7 @@ def _intents_from_messages(messages): return distinct_intents -async def _write_domain_to_file( +def _write_domain_to_file( domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain ) -> None: """Write an updated domain file to the file path.""" @@ -860,11 +885,16 @@ async def _write_domain_to_file( messages = _collect_messages(events) actions = _collect_actions(events) - templates = NEW_TEMPLATES + templates = NEW_TEMPLATES # type: Dict[Text, List[Dict[Text, Any]]] # TODO for now there is no way to distinguish between action and form collected_actions = list( - {e["name"] for e in actions if e["name"] not in default_action_names()} + { + e["name"] + for e in actions + if e["name"] not in default_action_names() + and e["name"] not in old_domain.form_names + } ) new_domain = Domain( @@ -873,7 +903,7 @@ async def _write_domain_to_file( slots=[], templates=templates, action_names=collected_actions, - form_names=[], + forms=[], ) old_domain.merge(new_domain).persist_clean(domain_path) @@ -881,15 +911,15 @@ async def _write_domain_to_file( async def _predict_till_next_listen( endpoint: EndpointConfig, - sender_id: Text, - sender_ids: List[Text], + conversation_id: Text, + conversation_ids: List[Text], plot_file: Optional[Text], ) -> None: """Predict and validate actions until we need to wait for a user message.""" listen = False while not listen: - result = await request_prediction(endpoint, sender_id) + result = await request_prediction(endpoint, conversation_id) predictions = result.get("scores") probabilities = [prediction["score"] for prediction in predictions] pred_out = int(np.argmax(probabilities)) @@ -897,19 +927,22 @@ async def _predict_till_next_listen( policy = result.get("policy") confidence = result.get("confidence") - await _print_history(sender_id, endpoint) + await _print_history(conversation_id, endpoint) await _plot_trackers( - sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)] + conversation_ids, + plot_file, + endpoint, + unconfirmed=[ActionExecuted(action_name)], ) listen = await _validate_action( - action_name, policy, confidence, predictions, endpoint, sender_id + action_name, policy, confidence, predictions, endpoint, conversation_id ) - await _plot_trackers(sender_ids, plot_file, endpoint) + await _plot_trackers(conversation_ids, plot_file, endpoint) tracker_dump = await retrieve_tracker( - endpoint, sender_id, EventVerbosity.AFTER_RESTART + endpoint, conversation_id, EventVerbosity.AFTER_RESTART ) events = tracker_dump.get("events", []) @@ -922,19 +955,19 @@ async def _predict_till_next_listen( "buttons", None ): response = _get_button_choice(last_event) - if response != cliutils.FREE_TEXT_INPUT_PROMPT: - await send_message(endpoint, sender_id, response) + if response != cli_utils.FREE_TEXT_INPUT_PROMPT: + await send_message(endpoint, conversation_id, response) def _get_button_choice(last_event: Dict[Text, Any]) -> Text: data = last_event["data"] message = last_event.get("text", "") - choices = cliutils.button_choices_from_message_data( + choices = cli_utils.button_choices_from_message_data( data, allow_free_text_input=True ) question = questionary.select(message, choices) - response = cliutils.payload_from_button_question(question) + response = cli_utils.payload_from_button_question(question) return response @@ -942,7 +975,7 @@ async def _correct_wrong_nlu( corrected_nlu: Dict[Text, Any], events: List[Dict[Text, Any]], endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, ) -> None: """A wrong NLU prediction got corrected, update core's tracker.""" @@ -958,7 +991,7 @@ async def _correct_wrong_nlu( corrected_message["parse_data"] = corrected_nlu await send_event( endpoint, - sender_id, + conversation_id, [revert_latest_user_utterance, listen_for_next_message, corrected_message], ) @@ -966,35 +999,37 @@ async def _correct_wrong_nlu( async def _correct_wrong_action( corrected_action: Text, endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, is_new_action: bool = False, ) -> None: """A wrong action prediction got corrected, update core's tracker.""" await send_action( - endpoint, sender_id, corrected_action, is_new_action=is_new_action + endpoint, conversation_id, corrected_action, is_new_action=is_new_action ) -def _form_is_rejected(action_name, tracker): +def _form_is_rejected(action_name: Text, tracker: Dict[Text, Any]) -> bool: """Check if the form got rejected with the most recent action name.""" return ( - tracker.get("active_form", {}).get("name") - and action_name != tracker["active_form"]["name"] + tracker.get(ACTIVE_LOOP_KEY, {}).get("name") + and action_name != tracker[ACTIVE_LOOP_KEY]["name"] and action_name != ACTION_LISTEN_NAME ) -def _form_is_restored(action_name, tracker): +def _form_is_restored(action_name: Text, tracker: Dict[Text, Any]) -> bool: """Check whether the form is called again after it was rejected.""" return ( - tracker.get("active_form", {}).get("rejected") + tracker.get(ACTIVE_LOOP_KEY, {}).get("rejected") and tracker.get("latest_action_name") == ACTION_LISTEN_NAME - and action_name == tracker.get("active_form", {}).get("name") + and action_name == tracker.get(ACTIVE_LOOP_KEY, {}).get("name") ) -async def _confirm_form_validation(action_name, tracker, endpoint, sender_id): +async def _confirm_form_validation( + action_name, tracker, endpoint, conversation_id +) -> None: """Ask a user whether an input for a form should be validated. Previous to this call, the active form was chosen after it was rejected.""" @@ -1002,18 +1037,20 @@ async def _confirm_form_validation(action_name, tracker, endpoint, sender_id): requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT) validation_questions = questionary.confirm( - "Should '{}' validate user input to fill " - "the slot '{}'?".format(action_name, requested_slot) + f"Should '{action_name}' validate user input to fill " + f"the slot '{requested_slot}'?" + ) + validate_input = await _ask_questions( + validation_questions, conversation_id, endpoint ) - validate_input = await _ask_questions(validation_questions, sender_id, endpoint) if not validate_input: # notify form action to skip validation await send_event( - endpoint, sender_id, {"event": "form_validation", "validate": False} + endpoint, conversation_id, {"event": "form_validation", "validate": False} ) - elif not tracker.get("active_form", {}).get("validate"): + elif not tracker.get(ACTIVE_LOOP_KEY, {}).get("validate"): # handle contradiction with learned behaviour warning_question = questionary.confirm( "ERROR: FormPolicy predicted no form validation " @@ -1024,10 +1061,10 @@ async def _confirm_form_validation(action_name, tracker, endpoint, sender_id): "will not work as expected." ) - await _ask_questions(warning_question, sender_id, endpoint) + await _ask_questions(warning_question, conversation_id, endpoint) # notify form action to validate an input await send_event( - endpoint, sender_id, {"event": "form_validation", "validate": True} + endpoint, conversation_id, {"event": "form_validation", "validate": True} ) @@ -1037,62 +1074,62 @@ async def _validate_action( confidence: float, predictions: List[Dict[Text, Any]], endpoint: EndpointConfig, - sender_id: Text, + conversation_id: Text, ) -> bool: """Query the user to validate if an action prediction is correct. Returns `True` if the prediction is correct, `False` otherwise.""" - question = questionary.confirm( - "The bot wants to run '{}', correct?".format(action_name) - ) + question = questionary.confirm(f"The bot wants to run '{action_name}', correct?") - is_correct = await _ask_questions(question, sender_id, endpoint) + is_correct = await _ask_questions(question, conversation_id, endpoint) if not is_correct: action_name, is_new_action = await _request_action_from_user( - predictions, sender_id, endpoint + predictions, conversation_id, endpoint ) else: is_new_action = False - tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART) + tracker = await retrieve_tracker( + endpoint, conversation_id, EventVerbosity.AFTER_RESTART + ) if _form_is_rejected(action_name, tracker): # notify the tracker that form was rejected await send_event( endpoint, - sender_id, + conversation_id, { "event": "action_execution_rejected", - "name": tracker["active_form"]["name"], + "name": tracker[ACTIVE_LOOP_KEY]["name"], }, ) elif _form_is_restored(action_name, tracker): - await _confirm_form_validation(action_name, tracker, endpoint, sender_id) + await _confirm_form_validation(action_name, tracker, endpoint, conversation_id) if not is_correct: await _correct_wrong_action( - action_name, endpoint, sender_id, is_new_action=is_new_action + action_name, endpoint, conversation_id, is_new_action=is_new_action ) else: - await send_action(endpoint, sender_id, action_name, policy, confidence) + await send_action(endpoint, conversation_id, action_name, policy, confidence) return action_name == ACTION_LISTEN_NAME def _as_md_message(parse_data: Dict[Text, Any]) -> Text: """Display the parse data of a message in markdown format.""" - from rasa.nlu.training_data.formats import MarkdownWriter + from rasa.nlu.training_data.formats.readerwriter import TrainingDataWriter if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX): return parse_data["text"] if not parse_data.get("entities"): parse_data["entities"] = [] - # noinspection PyProtectedMember - return MarkdownWriter()._generate_message_md(parse_data) + + return TrainingDataWriter.generate_message(parse_data) def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool: @@ -1102,7 +1139,7 @@ def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) - `/greet`. Return `True` if the intent is a known one.""" parse_data = latest_message.get("parse_data", {}) - intent = parse_data.get("intent", {}).get("name") + intent = parse_data.get("intent", {}).get(INTENT_NAME_KEY) if intent in intents: return True @@ -1111,7 +1148,7 @@ def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) - async def _validate_user_text( - latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text + latest_message: Dict[Text, Any], endpoint: EndpointConfig, conversation_id: Text ) -> bool: """Validate a user message input as free text. @@ -1119,37 +1156,39 @@ async def _validate_user_text( parse_data = latest_message.get("parse_data", {}) text = _as_md_message(parse_data) - intent = parse_data.get("intent", {}).get("name") + intent = parse_data.get("intent", {}).get(INTENT_NAME_KEY) entities = parse_data.get("entities", []) if entities: message = ( - "Is the intent '{}' correct for '{}' and are " - "all entities labeled correctly?".format(intent, text) + f"Is the intent '{intent}' correct for '{text}' and are " + f"all entities labeled correctly?" ) else: message = ( - "Your NLU model classified '{}' with intent '{}'" - " and there are no entities, is this correct?".format(text, intent) + f"Your NLU model classified '{text}' with intent '{intent}'" + f" and there are no entities, is this correct?" ) if intent is None: - print ("The NLU classification for '{}' returned '{}'".format(text, intent)) + print(f"The NLU classification for '{text}' returned '{intent}'") return False else: question = questionary.confirm(message) - return await _ask_questions(question, sender_id, endpoint) + return await _ask_questions(question, conversation_id, endpoint) async def _validate_nlu( - intents: List[Text], endpoint: EndpointConfig, sender_id: Text + intents: List[Text], endpoint: EndpointConfig, conversation_id: Text ) -> None: """Validate if a user message, either text or intent is correct. If the prediction of the latest user message is incorrect, the tracker will be corrected with the correct intent / entities.""" - tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART) + tracker = await retrieve_tracker( + endpoint, conversation_id, EventVerbosity.AFTER_RESTART + ) latest_message = latest_user_message(tracker.get("events", [])) or {} @@ -1158,29 +1197,29 @@ async def _validate_nlu( ): valid = _validate_user_regex(latest_message, intents) else: - valid = await _validate_user_text(latest_message, endpoint, sender_id) + valid = await _validate_user_text(latest_message, endpoint, conversation_id) if not valid: corrected_intent = await _request_intent_from_user( - latest_message, intents, sender_id, endpoint + latest_message, intents, conversation_id, endpoint ) # corrected intents have confidence 1.0 corrected_intent["confidence"] = 1.0 events = tracker.get("events", []) - entities = await _correct_entities(latest_message, endpoint, sender_id) + entities = await _correct_entities(latest_message, endpoint, conversation_id) corrected_nlu = { "intent": corrected_intent, "entities": entities, "text": latest_message.get("text"), } - await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id) + await _correct_wrong_nlu(corrected_nlu, events, endpoint, conversation_id) async def _correct_entities( - latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text + latest_message: Dict[Text, Any], endpoint: EndpointConfig, conversation_id: Text ) -> List[Dict[Text, Any]]: """Validate the entities of a user message. @@ -1193,9 +1232,9 @@ async def _correct_entities( "Please mark the entities using [value](type) notation", default=entity_str ) - annotation = await _ask_questions(question, sender_id, endpoint) + annotation = await _ask_questions(question, conversation_id, endpoint) # noinspection PyProtectedMember - parse_annotated = MarkdownReader()._parse_training_example(annotation) + parse_annotated = MarkdownReader().parse_training_example(annotation) corrected_entities = _merge_annotated_and_original_entities( parse_annotated, parse_original @@ -1204,7 +1243,9 @@ async def _correct_entities( return corrected_entities -def _merge_annotated_and_original_entities(parse_annotated, parse_original): +def _merge_annotated_and_original_entities( + parse_annotated: Message, parse_original: Dict[Text, Any] +) -> List[Dict[Text, Any]]: # overwrite entities which have already been # annotated in the original annotation to preserve # additional entity parser information @@ -1217,27 +1258,34 @@ def _merge_annotated_and_original_entities(parse_annotated, parse_original): return entities -def _is_same_entity_annotation(entity, other): - return entity["value"] == other["value"] and entity["entity"] == other["entity"] +def _is_same_entity_annotation(entity: Dict[Text, Any], other: Dict[Text, Any]) -> bool: + return ( + entity["value"] == other["value"] + and entity["entity"] == other["entity"] + and entity.get("group") == other.get("group") + and entity.get("role") == other.get("group") + ) -async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None: +async def _enter_user_message(conversation_id: Text, endpoint: EndpointConfig) -> None: """Request a new message from the user.""" question = questionary.text("Your input ->") - message = await _ask_questions(question, sender_id, endpoint, lambda a: not a) + message = await _ask_questions(question, conversation_id, endpoint, lambda a: not a) if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART): raise RestartConversation() - await send_message(endpoint, sender_id, message) + await send_message(endpoint, conversation_id, message) -async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool: +async def is_listening_for_message( + conversation_id: Text, endpoint: EndpointConfig +) -> bool: """Check if the conversation is in need for a user message.""" - tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED) + tracker = await retrieve_tracker(endpoint, conversation_id, EventVerbosity.APPLIED) for i, e in enumerate(reversed(tracker.get("events", []))): if e.get("event") == UserUttered.type_name: @@ -1247,10 +1295,10 @@ async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> return False -async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None: +async def _undo_latest(conversation_id: Text, endpoint: EndpointConfig) -> None: """Undo either the latest bot action or user message, whatever is last.""" - tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL) + tracker = await retrieve_tracker(endpoint, conversation_id, EventVerbosity.ALL) # Get latest `UserUtterance` or `ActionExecuted` event. last_event_type = None @@ -1263,54 +1311,54 @@ async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None: if last_event_type == ActionExecuted.type_name: undo_action = ActionReverted().as_dict() - await send_event(endpoint, sender_id, undo_action) + await send_event(endpoint, conversation_id, undo_action) elif last_event_type == UserUttered.type_name: undo_user_message = UserUtteranceReverted().as_dict() listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict() await send_event( - endpoint, sender_id, [undo_user_message, listen_for_next_message] + endpoint, conversation_id, [undo_user_message, listen_for_next_message] ) async def _fetch_events( - sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig + conversation_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig ) -> List[List[Event]]: - """Retrieve all event trackers from the endpoint for all sender ids.""" + """Retrieve all event trackers from the endpoint for all conversation ids.""" event_sequences = [] - for sender_id in sender_ids: - if isinstance(sender_id, str): - tracker = await retrieve_tracker(endpoint, sender_id) + for conversation_id in conversation_ids: + if isinstance(conversation_id, str): + tracker = await retrieve_tracker(endpoint, conversation_id) events = tracker.get("events", []) for conversation in _split_conversation_at_restarts(events): parsed_events = rasa.core.events.deserialise_events(conversation) event_sequences.append(parsed_events) else: - event_sequences.append(sender_id) + event_sequences.append(conversation_id) return event_sequences async def _plot_trackers( - sender_ids: List[Union[Text, List[Event]]], + conversation_ids: List[Union[Text, List[Event]]], output_file: Optional[Text], endpoint: EndpointConfig, unconfirmed: Optional[List[Event]] = None, -): - """Create a plot of the trackers of the passed sender ids. +) -> None: + """Create a plot of the trackers of the passed conversation ids. - This assumes that the last sender id is the conversation we are currently + This assumes that the last conversation id is the conversation we are currently working on. If there are events that are not part of this active tracker yet, they can be passed as part of `unconfirmed`. They will be appended to the currently active conversation.""" - if not output_file or not sender_ids: + if not output_file or not conversation_ids: # if there is no output file provided, we are going to skip plotting - # same happens if there are no sender ids - return None + # same happens if there are no conversation ids + return - event_sequences = await _fetch_events(sender_ids, endpoint) + event_sequences = await _fetch_events(conversation_ids, endpoint) if unconfirmed: event_sequences[-1].extend(unconfirmed) @@ -1331,97 +1379,100 @@ def _print_help(skip_visualization: bool) -> None: visualization_url = DEFAULT_SERVER_FORMAT.format( "http", DEFAULT_SERVER_PORT + 1 ) - visualization_help = "Visualisation at {}/visualization.html.".format( - visualization_url + visualization_help = ( + f"Visualisation at {visualization_url}/visualization.html ." ) else: visualization_help = "" rasa.cli.utils.print_success( - "Bot loaded. {}\n" - "Type a message and press enter " - "(press 'Ctr-c' to exit). " - "".format(visualization_help) + f"Bot loaded. {visualization_help}\n" + f"Type a message and press enter " + f"(press 'Ctr-c' to exit)." ) async def record_messages( endpoint: EndpointConfig, - sender_id: Text = UserMessage.DEFAULT_SENDER_ID, + file_importer: TrainingDataImporter, + conversation_id: Text = UserMessage.DEFAULT_SENDER_ID, max_message_limit: Optional[int] = None, - stories: Optional[Text] = None, skip_visualization: bool = False, -): +) -> None: """Read messages from the command line and print bot responses.""" - from rasa.core import training - try: - _print_help(skip_visualization) - try: domain = await retrieve_domain(endpoint) except ClientError: logger.exception( - "Failed to connect to Rasa Core server at '{}'. " - "Is the server running?".format(endpoint.url) + f"Failed to connect to Rasa Core server at '{endpoint.url}'. " + f"Is the server running?" ) return - trackers = await training.load_data( - stories, - Domain.from_dict(domain), - augmentation_factor=0, - use_story_concatenation=False, - ) - intents = [next(iter(i)) for i in (domain.get("intents") or [])] num_messages = 0 - sender_ids = [t.events for t in trackers] + [sender_id] if not skip_visualization: - plot_file = "story_graph.dot" - await _plot_trackers(sender_ids, plot_file, endpoint) + events_including_current_user_id = await _get_tracker_events_to_plot( + domain, file_importer, conversation_id + ) + + plot_file = DEFAULT_STORY_GRAPH_FILE + await _plot_trackers(events_including_current_user_id, plot_file, endpoint) else: + # `None` means that future `_plot_trackers` calls will also skip the + # visualization. plot_file = None + events_including_current_user_id = [] + + _print_help(skip_visualization) while not utils.is_limit_reached(num_messages, max_message_limit): try: - if await is_listening_for_message(sender_id, endpoint): - await _enter_user_message(sender_id, endpoint) - await _validate_nlu(intents, endpoint, sender_id) + if await is_listening_for_message(conversation_id, endpoint): + await _enter_user_message(conversation_id, endpoint) + await _validate_nlu(intents, endpoint, conversation_id) await _predict_till_next_listen( - endpoint, sender_id, sender_ids, plot_file + endpoint, + conversation_id, + events_including_current_user_id, + plot_file, ) num_messages += 1 except RestartConversation: - await send_event(endpoint, sender_id, Restarted().as_dict()) + await send_event(endpoint, conversation_id, Restarted().as_dict()) await send_event( - endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict() + endpoint, + conversation_id, + ActionExecuted(ACTION_LISTEN_NAME).as_dict(), ) logger.info("Restarted conversation, starting a new one.") except UndoLastStep: - await _undo_latest(sender_id, endpoint) - await _print_history(sender_id, endpoint) + await _undo_latest(conversation_id, endpoint) + await _print_history(conversation_id, endpoint) except ForkTracker: - await _print_history(sender_id, endpoint) + await _print_history(conversation_id, endpoint) - events_fork = await _request_fork_from_user(sender_id, endpoint) + events_fork = await _request_fork_from_user(conversation_id, endpoint) - await send_event(endpoint, sender_id, Restarted().as_dict()) + await send_event(endpoint, conversation_id, Restarted().as_dict()) if events_fork: for evt in events_fork: - await send_event(endpoint, sender_id, evt) + await send_event(endpoint, conversation_id, evt) logger.info("Restarted conversation at fork.") - await _print_history(sender_id, endpoint) - await _plot_trackers(sender_ids, plot_file, endpoint) + await _print_history(conversation_id, endpoint) + await _plot_trackers( + events_including_current_user_id, plot_file, endpoint + ) except Abort: return @@ -1430,19 +1481,59 @@ async def record_messages( raise -def _serve_application(app, stories, skip_visualization): +async def _get_tracker_events_to_plot( + domain: Dict[Text, Any], file_importer: TrainingDataImporter, conversation_id: Text +) -> List[Union[Text, List[Event]]]: + training_trackers = await _get_training_trackers(file_importer, domain) + number_of_trackers = len(training_trackers) + if number_of_trackers > MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION: + rasa.cli.utils.print_warning( + f"You have {number_of_trackers} different story paths in " + f"your training data. Visualizing them is very resource " + f"consuming. Hence, the visualization will only show the stories " + f"which you created during interactive learning, but not your " + f"training stories." + ) + training_trackers = [] + + training_data_events = [t.events for t in training_trackers] + events_including_current_user_id = training_data_events + [conversation_id] + + return events_including_current_user_id + + +async def _get_training_trackers( + file_importer: TrainingDataImporter, domain: Dict[str, Any] +) -> List[DialogueStateTracker]: + from rasa.core import training + + return await training.load_data( + file_importer, + Domain.from_dict(domain), + augmentation_factor=0, + use_story_concatenation=False, + ) + + +def _serve_application( + app: Sanic, + file_importer: TrainingDataImporter, + skip_visualization: bool, + conversation_id: Text, + port: int, +) -> Sanic: """Start a core server and attach the interactive learning IO.""" - endpoint = EndpointConfig(url=DEFAULT_SERVER_URL) + endpoint = EndpointConfig(url=DEFAULT_SERVER_FORMAT.format("http", port)) - async def run_interactive_io(running_app: Sanic): + async def run_interactive_io(running_app: Sanic) -> None: """Small wrapper to shut down the server once cmd io is done.""" await record_messages( endpoint=endpoint, - stories=stories, + file_importer=file_importer, skip_visualization=skip_visualization, - sender_id=uuid.uuid4().hex, + conversation_id=conversation_id, ) logger.info("Killing Sanic server now.") @@ -1453,12 +1544,12 @@ async def run_interactive_io(running_app: Sanic): update_sanic_log_level() - app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT) + app.run(host="0.0.0.0", port=port) return app -def start_visualization(image_path: Text = None) -> None: +def start_visualization(image_path: Text, port: int) -> None: """Add routes to serve the conversation visualization files.""" app = Sanic(__name__) @@ -1484,12 +1575,14 @@ def visualisation_png(request): update_sanic_log_level() - app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False) + app.run(host="0.0.0.0", port=port, access_log=False) # noinspection PyUnusedLocal -async def train_agent_on_start(args, endpoints, additional_arguments, app, loop): - _interpreter = NaturalLanguageInterpreter.create(args.get("nlu"), endpoints.nlu) +async def train_agent_on_start( + args, endpoints, additional_arguments, app, loop +) -> None: + _interpreter = NaturalLanguageInterpreter.create(endpoints.nlu or args.get("nlu")) model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model")) @@ -1499,7 +1592,6 @@ async def train_agent_on_start(args, endpoints, additional_arguments, app, loop) model_directory, _interpreter, endpoints, - args.get("dump_stories"), args.get("config")[0], None, additional_arguments, @@ -1507,13 +1599,15 @@ async def train_agent_on_start(args, endpoints, additional_arguments, app, loop) app.agent = _agent -async def wait_til_server_is_running(endpoint, max_retries=30, sleep_between_retries=1): +async def wait_til_server_is_running( + endpoint, max_retries=30, sleep_between_retries=1 +) -> bool: """Try to reach the server, retry a couple of times and sleep in between.""" while max_retries: try: r = await retrieve_status(endpoint) - logger.info("Reached core: {}".format(r)) + logger.info(f"Reached core: {r}") if not r.get("is_ready"): # server did not finish loading the agent yet # in this case, we need to wait till the model trained @@ -1532,11 +1626,11 @@ async def wait_til_server_is_running(endpoint, max_retries=30, sleep_between_ret def run_interactive_learning( - stories: Text = None, + file_importer: TrainingDataImporter, skip_visualization: bool = False, + conversation_id: Text = uuid.uuid4().hex, server_args: Dict[Text, Any] = None, - additional_arguments: Dict[Text, Any] = None, -): +) -> None: """Start the interactive learning with the model of the agent.""" global SAVE_IN_E2E server_args = server_args or {} @@ -1550,32 +1644,32 @@ def run_interactive_learning( if server_args.get("domain"): PATHS["domain"] = server_args["domain"] + port = server_args.get("port", DEFAULT_SERVER_PORT) + SAVE_IN_E2E = server_args["e2e"] if not skip_visualization: - p = Process(target=start_visualization, args=("story_graph.dot",)) + visualisation_port = port + 1 + p = Process( + target=start_visualization, + args=(DEFAULT_STORY_GRAPH_FILE, visualisation_port), + ) p.daemon = True p.start() else: p = None - app = run.configure_app(enable_api=True) + app = run.configure_app(port=port, conversation_id="default", enable_api=True) endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints")) # before_server_start handlers make sure the agent is loaded before the # interactive learning IO starts - if server_args.get("model"): - app.register_listener( - partial(run.load_agent_on_start, server_args.get("model"), endpoints, None), - "before_server_start", - ) - else: - app.register_listener( - partial(train_agent_on_start, server_args, endpoints, additional_arguments), - "before_server_start", - ) + app.register_listener( + partial(run.load_agent_on_start, server_args.get("model"), endpoints, None), + "before_server_start", + ) - _serve_application(app, stories, skip_visualization) + _serve_application(app, file_importer, skip_visualization, conversation_id, port) if not skip_visualization and p is not None: p.terminate() # pytype: disable=attribute-error diff --git a/rasa/core/training/loading.py b/rasa/core/training/loading.py new file mode 100644 index 000000000000..fd80553bbbc2 --- /dev/null +++ b/rasa/core/training/loading.py @@ -0,0 +1,137 @@ +import logging +import os +from pathlib import Path +from typing import Text, Optional, Dict, List, Union + +import rasa.utils.io as io_utils +from rasa.core.domain import Domain +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter +from rasa.core.training.story_reader.markdown_story_reader import MarkdownStoryReader +from rasa.core.training.story_reader.story_reader import StoryReader +from rasa.core.training.story_reader.yaml_story_reader import YAMLStoryReader +from rasa.core.training.structures import StoryStep +from rasa.data import YAML_FILE_EXTENSIONS, MARKDOWN_FILE_EXTENSIONS + +logger = logging.getLogger(__name__) + + +def _get_reader( + filename: Text, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, +) -> StoryReader: + + if Path(filename).suffix in MARKDOWN_FILE_EXTENSIONS: + return MarkdownStoryReader( + interpreter, domain, template_variables, use_e2e, filename + ) + elif Path(filename).suffix in YAML_FILE_EXTENSIONS: + return YAMLStoryReader( + interpreter, domain, template_variables, use_e2e, filename + ) + else: + # This is a use case for uploading the story over REST API. + # The source file has a random name. + return _guess_reader(filename, domain, interpreter, template_variables, use_e2e) + + +def _guess_reader( + filename: Text, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, +) -> StoryReader: + if YAMLStoryReader.is_yaml_story_file(filename): + return YAMLStoryReader( + interpreter, domain, template_variables, use_e2e, filename + ) + elif MarkdownStoryReader.is_markdown_story_file(filename): + return MarkdownStoryReader( + interpreter, domain, template_variables, use_e2e, filename + ) + raise ValueError( + f"Failed to find a reader class for the story file `{filename}`. " + f"Supported formats are " + f"{', '.join(MARKDOWN_FILE_EXTENSIONS.union(YAML_FILE_EXTENSIONS))}." + ) + + +async def load_data_from_resource( + resource: Union[Text, Path], + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, + exclusion_percentage: Optional[int] = None, +) -> List["StoryStep"]: + """Loads core training data from the specified folder. + + Args: + resource: Folder/File with core training data files. + domain: Domain object. + interpreter: Interpreter to be used for parsing user's utterances. + template_variables: Variables that have to be replaced in the training data. + use_e2e: Identifies if the e2e reader should be used. + exclusion_percentage: Identifies the percentage of training data that + should be excluded from the training. + + Returns: + Story steps from the training data. + """ + if not os.path.exists(resource): + raise ValueError(f"Resource '{resource}' does not exist.") + + return await load_data_from_files( + io_utils.list_files(resource), + domain, + interpreter, + template_variables, + use_e2e, + exclusion_percentage, + ) + + +async def load_data_from_files( + story_files: List[Text], + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, + exclusion_percentage: Optional[int] = None, +) -> List["StoryStep"]: + """Loads core training data from the specified files. + + Args: + story_files: List of files with training data in it. + domain: Domain object. + interpreter: Interpreter to be used for parsing user's utterances. + template_variables: Variables that have to be replaced in the training data. + use_e2e: Identifies whether the e2e reader should be used. + exclusion_percentage: Identifies the percentage of training data that + should be excluded from the training. + + Returns: + Story steps from the training data. + """ + story_steps = [] + + for story_file in story_files: + + reader = _get_reader( + story_file, domain, interpreter, template_variables, use_e2e + ) + + steps = await reader.read_from_file(story_file) + story_steps.extend(steps) + + if exclusion_percentage and exclusion_percentage != 100: + import random + + idx = int(round(exclusion_percentage / 100.0 * len(story_steps))) + random.shuffle(story_steps) + story_steps = story_steps[:-idx] + + return story_steps diff --git a/rasa/core/training/story_conflict.py b/rasa/core/training/story_conflict.py new file mode 100644 index 000000000000..7501f817ca2b --- /dev/null +++ b/rasa/core/training/story_conflict.py @@ -0,0 +1,321 @@ +import logging +from collections import defaultdict +from typing import List, Optional, Dict, Text, Tuple, Generator, NamedTuple + +from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.core.domain import PREV_PREFIX, Domain +from rasa.core.events import ActionExecuted, Event +from rasa.core.featurizers import MaxHistoryTrackerFeaturizer +from rasa.nlu.constants import INTENT +from rasa.core.training.generator import TrackerWithCachedStates + +logger = logging.getLogger(__name__) + + +class StoryConflict: + """ + Represents a conflict between two or more stories. + + Here, a conflict means that different actions are supposed to follow from + the same dialogue state, which most policies cannot learn. + + Attributes: + conflicting_actions: A list of actions that all follow from the same state. + conflict_has_prior_events: If `False`, then the conflict occurs without any + prior events (i.e. at the beginning of a dialogue). + """ + + def __init__(self, sliced_states: List[Optional[Dict[Text, float]]]) -> None: + """ + Creates a `StoryConflict` from a given state. + + Args: + sliced_states: The (sliced) dialogue state at which the conflict occurs. + """ + self._sliced_states = sliced_states + self._conflicting_actions = defaultdict( + list + ) # {"action": ["story_1", ...], ...} + + def __hash__(self) -> int: + return hash(str(list(self._sliced_states))) + + def add_conflicting_action(self, action: Text, story_name: Text) -> None: + """Adds another action that follows from the same state. + + Args: + action: Name of the action. + story_name: Name of the story where this action is chosen. + """ + self._conflicting_actions[action] += [story_name] + + @property + def conflicting_actions(self) -> List[Text]: + """List of conflicting actions. + + Returns: + List of conflicting actions. + + """ + return list(self._conflicting_actions.keys()) + + @property + def conflict_has_prior_events(self) -> bool: + """Checks if prior events exist. + + Returns: + `True` if anything has happened before this conflict, otherwise `False`. + """ + return _get_previous_event(self._sliced_states[-1])[0] is not None + + def __str__(self) -> Text: + # Describe where the conflict occurs in the stories + last_event_type, last_event_name = _get_previous_event(self._sliced_states[-1]) + if last_event_type: + conflict_message = f"Story structure conflict after {last_event_type} '{last_event_name}':\n" + else: + conflict_message = "Story structure conflict at the beginning of stories:\n" + + # List which stories are in conflict with one another + for action, stories in self._conflicting_actions.items(): + conflict_message += ( + f" {self._summarize_conflicting_actions(action, stories)}" + ) + + return conflict_message + + @staticmethod + def _summarize_conflicting_actions(action: Text, stories: List[Text]) -> Text: + """Gives a summarized textual description of where one action occurs. + + Args: + action: The name of the action. + stories: The stories in which the action occurs. + + Returns: + A textural summary. + """ + if len(stories) > 3: + # Four or more stories are present + conflict_description = ( + f"'{stories[0]}', '{stories[1]}', and {len(stories) - 2} other trackers" + ) + elif len(stories) == 3: + conflict_description = f"'{stories[0]}', '{stories[1]}', and '{stories[2]}'" + elif len(stories) == 2: + conflict_description = f"'{stories[0]}' and '{stories[1]}'" + elif len(stories) == 1: + conflict_description = f"'{stories[0]}'" + else: + raise ValueError( + "An internal error occurred while trying to summarise a conflict without stories. " + "Please file a bug report at https://github.com/RasaHQ/rasa." + ) + + return f"{action} predicted in {conflict_description}\n" + + +class TrackerEventStateTuple(NamedTuple): + """Holds a tracker, an event, and sliced states associated with those.""" + + tracker: TrackerWithCachedStates + event: Event + sliced_states: List[Dict[Text, float]] + + @property + def sliced_states_hash(self) -> int: + return hash(str(list(self.sliced_states))) + + +def _get_length_of_longest_story( + trackers: List[TrackerWithCachedStates], domain: Domain +) -> int: + """Returns the longest story in the given trackers. + + Args: + trackers: Trackers to get stories from. + domain: The domain. + + Returns: + The maximal length of any story + """ + return max([len(tracker.past_states(domain)) for tracker in trackers]) + + +def find_story_conflicts( + trackers: List[TrackerWithCachedStates], + domain: Domain, + max_history: Optional[int] = None, +) -> List[StoryConflict]: + """Generates `StoryConflict` objects, describing conflicts in the given trackers. + + Args: + trackers: Trackers in which to search for conflicts. + domain: The domain. + max_history: The maximum history length to be taken into account. + + Returns: + StoryConflict objects. + """ + if not max_history: + max_history = _get_length_of_longest_story(trackers, domain) + + logger.info(f"Considering the preceding {max_history} turns for conflict analysis.") + + # We do this in two steps, to reduce memory consumption: + + # Create a 'state -> list of actions' dict, where the state is + # represented by its hash + conflicting_state_action_mapping = _find_conflicting_states( + trackers, domain, max_history + ) + + # Iterate once more over all states and note the (unhashed) state, + # for which a conflict occurs + conflicts = _build_conflicts_from_states( + trackers, domain, max_history, conflicting_state_action_mapping + ) + + return conflicts + + +def _find_conflicting_states( + trackers: List[TrackerWithCachedStates], domain: Domain, max_history: int +) -> Dict[int, Optional[List[Text]]]: + """Identifies all states from which different actions follow. + + Args: + trackers: Trackers that contain the states. + domain: The domain object. + max_history: Number of turns to take into account for the state descriptions. + + Returns: + A dictionary mapping state-hashes to a list of actions that follow from each state. + """ + # Create a 'state -> list of actions' dict, where the state is + # represented by its hash + state_action_mapping = defaultdict(list) + for element in _sliced_states_iterator(trackers, domain, max_history): + hashed_state = element.sliced_states_hash + if element.event.as_story_string() not in state_action_mapping[hashed_state]: + state_action_mapping[hashed_state] += [element.event.as_story_string()] + + # Keep only conflicting `state_action_mapping`s + return { + state_hash: actions + for (state_hash, actions) in state_action_mapping.items() + if len(actions) > 1 + } + + +def _build_conflicts_from_states( + trackers: List[TrackerWithCachedStates], + domain: Domain, + max_history: int, + conflicting_state_action_mapping: Dict[int, Optional[List[Text]]], +) -> List["StoryConflict"]: + """Builds a list of `StoryConflict` objects for each given conflict. + + Args: + trackers: Trackers that contain the states. + domain: The domain object. + max_history: Number of turns to take into account for the state descriptions. + conflicting_state_action_mapping: A dictionary mapping state-hashes to a list of actions + that follow from each state. + + Returns: + A list of `StoryConflict` objects that describe inconsistencies in the story + structure. These objects also contain the history that leads up to the conflict. + """ + # Iterate once more over all states and note the (unhashed) state, + # for which a conflict occurs + conflicts = {} + for element in _sliced_states_iterator(trackers, domain, max_history): + hashed_state = element.sliced_states_hash + + if hashed_state in conflicting_state_action_mapping: + if hashed_state not in conflicts: + conflicts[hashed_state] = StoryConflict(element.sliced_states) + + conflicts[hashed_state].add_conflicting_action( + action=element.event.as_story_string(), + story_name=element.tracker.sender_id, + ) + + # Return list of conflicts that arise from unpredictable actions + # (actions that start the conversation) + return [ + conflict + for (hashed_state, conflict) in conflicts.items() + if conflict.conflict_has_prior_events + ] + + +def _sliced_states_iterator( + trackers: List[TrackerWithCachedStates], domain: Domain, max_history: int +) -> Generator[TrackerEventStateTuple, None, None]: + """Creates an iterator over sliced states. + + Iterate over all given trackers and all sliced states within each tracker, + where the slicing is based on `max_history`. + + Args: + trackers: List of trackers. + domain: Domain (used for tracker.past_states). + max_history: Assumed `max_history` value for slicing. + + Yields: + A (tracker, event, sliced_states) triplet. + """ + for tracker in trackers: + states = tracker.past_states(domain) + states = [dict(state) for state in states] + + idx = 0 + for event in tracker.events: + if isinstance(event, ActionExecuted): + sliced_states = MaxHistoryTrackerFeaturizer.slice_state_history( + states[: idx + 1], max_history + ) + yield TrackerEventStateTuple(tracker, event, sliced_states) + idx += 1 + + +def _get_previous_event( + state: Optional[Dict[Text, float]] +) -> Tuple[Optional[Text], Optional[Text]]: + """Returns previous event type and name. + + Returns the type and name of the event (action or intent) previous to the + given state. + + Args: + state: Element of sliced states. + + Returns: + Tuple of (type, name) strings of the prior event. + """ + + previous_event_type = None + previous_event_name = None + + if not state: + return previous_event_type, previous_event_name + + # A typical state is, for example, + # `{'prev_action_listen': 1.0, 'intent_greet': 1.0, 'slot_cuisine_0': 1.0}`. + # We need to look out for `prev_` and `intent_` prefixes in the labels. + for turn_label in state: + if ( + turn_label.startswith(PREV_PREFIX) + and turn_label.replace(PREV_PREFIX, "") != ACTION_LISTEN_NAME + ): + # The `prev_...` was an action that was NOT `action_listen` + return "action", turn_label.replace(PREV_PREFIX, "") + elif turn_label.startswith(INTENT + "_"): + # We found an intent, but it is only the previous event if + # the `prev_...` was `prev_action_listen`, so we don't return. + previous_event_type = "intent" + previous_event_name = turn_label.replace(INTENT + "_", "") + + return previous_event_type, previous_event_name diff --git a/tests/nlu/base/__init__.py b/rasa/core/training/story_reader/__init__.py similarity index 100% rename from tests/nlu/base/__init__.py rename to rasa/core/training/story_reader/__init__.py diff --git a/rasa/core/training/story_reader/markdown_story_reader.py b/rasa/core/training/story_reader/markdown_story_reader.py new file mode 100644 index 000000000000..c51191ede65b --- /dev/null +++ b/rasa/core/training/story_reader/markdown_story_reader.py @@ -0,0 +1,265 @@ +import asyncio +import json +import logging +import os +import re +from pathlib import PurePath, Path +from typing import Dict, Text, List, Any, Union + +import rasa.utils.io as io_utils +from rasa.constants import DOCS_URL_DOMAINS, DOCS_URL_STORIES +from rasa.core.constants import INTENT_MESSAGE_PREFIX +from rasa.core.events import UserUttered +from rasa.core.exceptions import StoryParseError +from rasa.core.interpreter import RegexInterpreter +from rasa.core.training.dsl import EndToEndReader +from rasa.core.training.story_reader.story_reader import StoryReader +from rasa.core.training.structures import StoryStep, FORM_PREFIX +from rasa.data import MARKDOWN_FILE_EXTENSIONS +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils.common import raise_warning + +logger = logging.getLogger(__name__) + + +class MarkdownStoryReader(StoryReader): + """Class that reads the core training data in a Markdown format + + """ + + async def read_from_file(self, filename: Union[Text, Path]) -> List[StoryStep]: + """Given a md file reads the contained stories.""" + + try: + with open(filename, "r", encoding=io_utils.DEFAULT_ENCODING) as f: + lines = f.readlines() + + return await self._process_lines(lines) + except ValueError as err: + file_info = "Invalid story file format. Failed to parse '{}'".format( + os.path.abspath(filename) + ) + logger.exception(file_info) + if not err.args: + err.args = ("",) + err.args = err.args + (file_info,) + raise + + async def _process_lines(self, lines: List[Text]) -> List[StoryStep]: + multiline_comment = False + + for idx, line in enumerate(lines): + line_num = idx + 1 + try: + line = self._replace_template_variables(self._clean_up_line(line)) + if line.strip() == "": + continue + elif line.startswith("<!--"): + multiline_comment = True + continue + elif multiline_comment and line.endswith("-->"): + multiline_comment = False + continue + elif multiline_comment: + continue + elif line.startswith(">>"): + # reached a new rule block + rule_name = line.lstrip(">> ") + self._new_rule_part(rule_name, self.source_name) + elif line.startswith("#"): + # reached a new story block + name = line[1:].strip("# ") + self._new_story_part(name, self.source_name) + elif line.startswith(">"): + # reached a checkpoint + name, conditions = self._parse_event_line(line[1:].strip()) + self._add_checkpoint(name, conditions) + elif re.match(fr"^[*\-]\s+{FORM_PREFIX}", line): + logger.debug( + "Skipping line {}, " + "because it was generated by " + "form action".format(line) + ) + elif line.startswith("-"): + # reached a slot, event, or executed action + event_name, parameters = self._parse_event_line(line[1:]) + self._add_event(event_name, parameters) + elif line.startswith("*"): + # reached a user message + user_messages = [el.strip() for el in line[1:].split(" OR ")] + if self.use_e2e: + await self._add_e2e_messages(user_messages, line_num) + else: + await self._add_user_messages(user_messages, line_num) + else: + # reached an unknown type of line + logger.warning( + f"Skipping line {line_num}. " + "No valid command found. " + f"Line Content: '{line}'" + ) + except Exception as e: + msg = f"Error in line {line_num}: {e}" + logger.error(msg, exc_info=1) # pytype: disable=wrong-arg-types + raise ValueError(msg) + self._add_current_stories_to_result() + return self.story_steps + + @staticmethod + def _parameters_from_json_string(s: Text, line: Text) -> Dict[Text, Any]: + """Parse the passed string as json and create a parameter dict.""" + + if s is None or not s.strip(): + # if there is no strings there are not going to be any parameters + return {} + + try: + parsed_slots = json.loads(s) + if isinstance(parsed_slots, dict): + return parsed_slots + else: + raise Exception( + "Parsed value isn't a json object " + "(instead parser found '{}')" + ".".format(type(parsed_slots)) + ) + except Exception as e: + raise ValueError( + "Invalid to parse arguments in line " + "'{}'. Failed to decode parameters" + "as a json object. Make sure the event" + "name is followed by a proper json " + "object. Error: {}".format(line, e) + ) + + def _replace_template_variables(self, line: Text) -> Text: + def process_match(matchobject): + varname = matchobject.group(1) + if varname in self.template_variables: + return self.template_variables[varname] + else: + raise ValueError( + "Unknown variable `{var}` " + "in template line '{line}'" + "".format(var=varname, line=line) + ) + + template_rx = re.compile(r"`([^`]+)`") + return template_rx.sub(process_match, line) + + @staticmethod + def _clean_up_line(line: Text) -> Text: + """Removes comments and trailing spaces""" + + return re.sub(r"<!--.*?-->", "", line).strip() + + @staticmethod + def _parse_event_line(line): + """Tries to parse a single line as an event with arguments.""" + + # the regex matches "slot{"a": 1}" + m = re.search("^([^{]+)([{].+)?", line) + if m is not None: + event_name = m.group(1).strip() + slots_str = m.group(2) + parameters = MarkdownStoryReader._parameters_from_json_string( + slots_str, line + ) + return event_name, parameters + else: + raise_warning( + f"Failed to parse action line '{line}'. Ignoring this line.", + docs=DOCS_URL_STORIES, + ) + return "", {} + + async def _add_user_messages(self, messages, line_num): + if not self.current_step_builder: + raise StoryParseError( + "User message '{}' at invalid location. " + "Expected story start.".format(messages) + ) + parsed_messages = await asyncio.gather( + *[self._parse_message(m, line_num) for m in messages] + ) + self.current_step_builder.add_user_messages( + parsed_messages, self.unfold_or_utterances + ) + + async def _add_e2e_messages(self, e2e_messages: List[Text], line_num: int) -> None: + if not self.current_step_builder: + raise StoryParseError( + "End-to-end message '{}' at invalid " + "location. Expected story start." + "".format(e2e_messages) + ) + e2e_reader = EndToEndReader() + parsed_messages = [] + for m in e2e_messages: + message = e2e_reader._parse_item(m) + parsed = await self._parse_message(message.text, line_num) + + parsed.parse_data["true_intent"] = message.data["true_intent"] + parsed.parse_data["true_entities"] = message.data.get("entities") or [] + parsed_messages.append(parsed) + self.current_step_builder.add_user_messages(parsed_messages) + + async def _parse_message(self, message: Text, line_num: int) -> UserUttered: + if message.startswith(INTENT_MESSAGE_PREFIX): + parse_data = await RegexInterpreter().parse(message) + else: + parse_data = await self.interpreter.parse(message) + utterance = UserUttered( + message, parse_data.get("intent"), parse_data.get("entities"), parse_data + ) + intent_name = utterance.intent.get(INTENT_NAME_KEY) + if self.domain and intent_name not in self.domain.intents: + raise_warning( + f"Found unknown intent '{intent_name}' on line {line_num}. " + "Please, make sure that all intents are " + "listed in your domain yaml.", + UserWarning, + docs=DOCS_URL_DOMAINS, + ) + return utterance + + @staticmethod + def is_markdown_story_file(file_path: Union[Text, Path]) -> bool: + """Check if file contains Core training data or rule data in Markdown format. + + Args: + file_path: Path of the file to check. + + Returns: + `True` in case the file is a Core Markdown training data or rule data file, + `False` otherwise. + """ + suffix = PurePath(file_path).suffix + + if suffix not in MARKDOWN_FILE_EXTENSIONS: + return False + + try: + with open( + file_path, encoding=io_utils.DEFAULT_ENCODING, errors="surrogateescape" + ) as lines: + return any( + MarkdownStoryReader._contains_story_or_rule_pattern(line) + for line in lines + ) + except Exception as e: + # catch-all because we might be loading files we are not expecting to load + logger.error( + f"Tried to check if '{file_path}' is a story file, but failed to " + f"read it. If this file contains story or rule data, you should " + f"investigate this error, otherwise it is probably best to " + f"move the file to a different location. Error: {e}" + ) + return False + + @staticmethod + def _contains_story_or_rule_pattern(text: Text) -> bool: + story_pattern = r".*##.+" + rule_pattern = r".*>>.+" + + return any(re.match(pattern, text) for pattern in [story_pattern, rule_pattern]) diff --git a/rasa/core/training/story_reader/story_reader.py b/rasa/core/training/story_reader/story_reader.py new file mode 100644 index 000000000000..ffa028fc6b53 --- /dev/null +++ b/rasa/core/training/story_reader/story_reader.py @@ -0,0 +1,102 @@ +import logging +from typing import Optional, Dict, Text, List, Any + +from rasa.core.domain import Domain +from rasa.core.events import SlotSet, ActionExecuted, Event +from rasa.core.exceptions import StoryParseError +from rasa.core.interpreter import NaturalLanguageInterpreter +from rasa.core.training.story_reader.story_step_builder import StoryStepBuilder +from rasa.core.training.structures import StoryStep + +logger = logging.getLogger(__name__) + + +class StoryReader: + """Helper class to read a story file.""" + + def __init__( + self, + interpreter: NaturalLanguageInterpreter, + domain: Optional[Domain] = None, + template_vars: Optional[Dict] = None, + use_e2e: bool = False, + source_name: Text = None, + unfold_or_utterances: bool = True, + ) -> None: + """Constructor for the StoryReader. + + Args: + interpreter: Interpreter to be used to parse intents. + domain: Domain object. + template_vars: Template variables to be replaced. + use_e2e: Specifies whether to use the e2e parser or not. + source_name: Name of the training data source. + unfold_or_utterances: Identifies if the user utterance is a part of + OR statement. This parameter is used only to simplify the conversation + from MD story files. Don't use it other ways, because it ends up + in a invalid story that cannot be user for real training. + Default value is `True`, which preserves the expected behavior + of the reader. + """ + self.story_steps = [] + self.current_step_builder: Optional[StoryStepBuilder] = None + self.domain = domain + self.interpreter = interpreter + self.template_variables = template_vars if template_vars else {} + self.use_e2e = use_e2e + self.source_name = source_name + self.unfold_or_utterances = unfold_or_utterances + + async def read_from_file(self, filename: Text) -> List[StoryStep]: + raise NotImplementedError + + def _add_current_stories_to_result(self): + if self.current_step_builder: + self.current_step_builder.flush() + self.story_steps.extend(self.current_step_builder.story_steps) + + def _new_story_part(self, name: Text, source_name: Text): + self._add_current_stories_to_result() + self.current_step_builder = StoryStepBuilder(name, source_name) + + def _new_rule_part(self, name: Text, source_name: Text): + self._add_current_stories_to_result() + self.current_step_builder = StoryStepBuilder(name, source_name, is_rule=True) + + def _add_event(self, event_name, parameters): + + # add 'name' only if event is not a SlotSet, + # because there might be a slot with slot_key='name' + if "name" not in parameters and event_name != SlotSet.type_name: + parameters["name"] = event_name + + parsed_events = Event.from_story_string( + event_name, parameters, default=ActionExecuted + ) + if parsed_events is None: + raise StoryParseError( + "Unknown event '{}'. It is Neither an event " + "nor an action).".format(event_name) + ) + if self.current_step_builder is None: + raise StoryParseError( + "Failed to handle event '{}'. There is no " + "started story block available. " + "".format(event_name) + ) + + for p in parsed_events: + self.current_step_builder.add_event(p) + + def _add_checkpoint( + self, name: Text, conditions: Optional[Dict[Text, Any]] + ) -> None: + + # Ensure story part already has a name + if not self.current_step_builder: + raise StoryParseError( + "Checkpoint '{}' is at an invalid location. " + "Expected a story start.".format(name) + ) + + self.current_step_builder.add_checkpoint(name, conditions) diff --git a/rasa/core/training/story_reader/story_step_builder.py b/rasa/core/training/story_reader/story_step_builder.py new file mode 100644 index 000000000000..d7dc59dd0743 --- /dev/null +++ b/rasa/core/training/story_reader/story_step_builder.py @@ -0,0 +1,133 @@ +import logging +from typing import Text, Optional, Dict, Any, List + +from rasa.constants import DOCS_URL_STORIES +from rasa.core import utils +from rasa.core.events import UserUttered +from rasa.core.training.structures import ( + Checkpoint, + GENERATED_CHECKPOINT_PREFIX, + GENERATED_HASH_LENGTH, + STORY_START, + StoryStep, +) +import rasa.utils.common as common_utils + +logger = logging.getLogger(__name__) + + +class StoryStepBuilder: + def __init__(self, name: Text, source_name: Text, is_rule: bool = False) -> None: + self.name = name + self.source_name = source_name + self.story_steps = [] + self.current_steps = [] + self.start_checkpoints = [] + self.is_rule = is_rule + + def add_checkpoint(self, name: Text, conditions: Optional[Dict[Text, Any]]) -> None: + + # Depending on the state of the story part this + # is either a start or an end check point + if not self.current_steps: + self.start_checkpoints.append(Checkpoint(name, conditions)) + else: + if conditions: + common_utils.raise_warning( + f"End or intermediate checkpoints " + f"do not support conditions! " + f"(checkpoint: {name})", + docs=DOCS_URL_STORIES + "#checkpoints", + ) + additional_steps = [] + for t in self.current_steps: + if t.end_checkpoints: + tcp = t.create_copy(use_new_id=True) + tcp.end_checkpoints = [Checkpoint(name)] + additional_steps.append(tcp) + else: + t.end_checkpoints = [Checkpoint(name)] + self.current_steps.extend(additional_steps) + + def _prev_end_checkpoints(self) -> List[Checkpoint]: + if not self.current_steps: + return self.start_checkpoints + else: + # makes sure we got each end name only once + end_names = {e.name for s in self.current_steps for e in s.end_checkpoints} + return [Checkpoint(name) for name in end_names] + + def add_user_messages( + self, messages: List[UserUttered], unfold_or_utterances: bool = True + ) -> None: + """Adds next story steps with the user's utterances. + + Args: + messages: User utterances. + unfold_or_utterances: Identifies if the user utterance is a part of + OR statement. This parameter is used only to simplify the conversation + from MD story files. Don't use it other ways, because it ends up + in a invalid story that cannot be user for real training. + Default value is `True`, which preserves the expected behavior + of the reader. + """ + self.ensure_current_steps() + + if len(messages) == 1: + # If there is only one possible intent, we'll keep things simple + for t in self.current_steps: + t.add_user_message(messages[0]) + else: + # this simplifies conversion between formats, but breaks the logic + if not unfold_or_utterances: + for t in self.current_steps: + t.add_events(messages) + return + + # If there are multiple different intents the + # user can use the express the same thing + # we need to copy the blocks and create one + # copy for each possible message + prefix = GENERATED_CHECKPOINT_PREFIX + "OR_" + generated_checkpoint = utils.generate_id(prefix, GENERATED_HASH_LENGTH) + updated_steps = [] + for t in self.current_steps: + for m in messages: + copied = t.create_copy(use_new_id=True) + copied.add_user_message(m) + copied.end_checkpoints = [Checkpoint(generated_checkpoint)] + updated_steps.append(copied) + self.current_steps = updated_steps + + def add_event(self, event) -> None: + self.ensure_current_steps() + for t in self.current_steps: + t.add_event(event) + + def ensure_current_steps(self) -> None: + completed = [step for step in self.current_steps if step.end_checkpoints] + unfinished = [step for step in self.current_steps if not step.end_checkpoints] + self.story_steps.extend(completed) + if unfinished: + self.current_steps = unfinished + else: + self.current_steps = self._next_story_steps() + + def flush(self) -> None: + if self.current_steps: + self.story_steps.extend(self.current_steps) + self.current_steps = [] + + def _next_story_steps(self) -> List[StoryStep]: + start_checkpoints = self._prev_end_checkpoints() + if not start_checkpoints: + start_checkpoints = [Checkpoint(STORY_START)] + current_turns = [ + StoryStep( + block_name=self.name, + start_checkpoints=start_checkpoints, + source_name=self.source_name, + is_rule=self.is_rule, + ) + ] + return current_turns diff --git a/rasa/core/training/story_reader/yaml_story_reader.py b/rasa/core/training/story_reader/yaml_story_reader.py new file mode 100644 index 000000000000..218c32f8af5d --- /dev/null +++ b/rasa/core/training/story_reader/yaml_story_reader.py @@ -0,0 +1,443 @@ +import logging +from pathlib import Path +from typing import Dict, Text, List, Any, Optional, Union + +from rasa.utils.validation import validate_yaml_schema, InvalidYamlFileError +from ruamel.yaml.parser import ParserError + +import rasa.utils.common as common_utils +import rasa.utils.io as io_utils +from rasa.constants import DOCS_URL_STORIES, DOCS_URL_RULES +from rasa.core.constants import INTENT_MESSAGE_PREFIX +from rasa.core.actions.action import RULE_SNIPPET_ACTION_NAME +from rasa.core.events import UserUttered, SlotSet, ActiveLoop +from rasa.core.training.story_reader.story_reader import StoryReader +from rasa.core.training.structures import StoryStep +from rasa.data import YAML_FILE_EXTENSIONS +from rasa.nlu.constants import INTENT_NAME_KEY + +logger = logging.getLogger(__name__) + +KEY_STORIES = "stories" +KEY_STORY_NAME = "story" +KEY_RULES = "rules" +KEY_RULE_NAME = "rule" +KEY_STEPS = "steps" +KEY_ENTITIES = "entities" +KEY_USER_INTENT = "intent" +KEY_SLOT_NAME = "slot_was_set" +KEY_SLOT_VALUE = "value" +KEY_ACTIVE_LOOP = "active_loop" +KEY_ACTION = "action" +KEY_CHECKPOINT = "checkpoint" +KEY_CHECKPOINT_SLOTS = "slot_was_set" +KEY_METADATA = "metadata" +KEY_OR = "or" +KEY_RULE_CONDITION = "condition" +KEY_WAIT_FOR_USER_INPUT_AFTER_RULE = "wait_for_user_input" +KEY_RULE_FOR_CONVERSATION_START = "conversation_start" + + +CORE_SCHEMA_FILE = "core/schemas/stories.yml" + + +class YAMLStoryReader(StoryReader): + """Class that reads Core training data and rule data in YAML format.""" + + @classmethod + def from_reader(cls, reader: "YAMLStoryReader") -> "YAMLStoryReader": + """Create a reader from another reader. + + Args: + reader: Another reader. + + Returns: + A new reader instance. + """ + return cls( + reader.interpreter, + reader.domain, + reader.template_variables, + reader.use_e2e, + reader.source_name, + reader.unfold_or_utterances, + ) + + async def read_from_file(self, filename: Union[Text, Path]) -> List[StoryStep]: + """Read stories or rules from file. + + Args: + filename: Path to the story/rule file. + + Returns: + `StoryStep`s read from `filename`. + """ + self.source_name = filename + + try: + file_content = io_utils.read_file(filename, io_utils.DEFAULT_ENCODING) + validate_yaml_schema(file_content, CORE_SCHEMA_FILE) + yaml_content = io_utils.read_yaml(file_content) + except (ValueError, ParserError) as e: + common_utils.raise_warning( + f"Failed to read YAML from '{filename}', it will be skipped. Error: {e}" + ) + return [] + except InvalidYamlFileError as e: + raise ValueError from e + + return self.read_from_parsed_yaml(yaml_content) + + def read_from_parsed_yaml( + self, parsed_content: Dict[Text, Union[Dict, List]] + ) -> List[StoryStep]: + """Read stories from parsed YAML. + + Args: + parsed_content: The parsed YAML as a dictionary. + + Returns: + The parsed stories or rules. + """ + from rasa.validator import Validator + + if not Validator.validate_training_data_format_version( + parsed_content, self.source_name + ): + return [] + + for key, parser_class in { + KEY_STORIES: StoryParser, + KEY_RULES: RuleParser, + }.items(): + data = parsed_content.get(key, []) + parser = parser_class.from_reader(self) + parser.parse_data(data) + self.story_steps.extend(parser.get_steps()) + + return self.story_steps + + @staticmethod + def is_yaml_story_file(file_path: Text) -> bool: + """Check if file contains Core training data or rule data in YAML format. + + Args: + file_path: Path of the file to check. + + Returns: + `True` in case the file is a Core YAML training data or rule data file, + `False` otherwise. + """ + suffix = Path(file_path).suffix + + if suffix and suffix not in YAML_FILE_EXTENSIONS: + return False + + try: + content = io_utils.read_yaml_file(file_path) + return any(key in content for key in [KEY_STORIES, KEY_RULES]) + except Exception as e: + # Using broad `Exception` because yaml library is not exposing all Errors + common_utils.raise_warning( + f"Tried to check if '{file_path}' is a story or rule file, but failed " + f"to read it. If this file contains story or rule data, you should " + f"investigate this error, otherwise it is probably best to " + f"move the file to a different location. Error: {e}" + ) + return False + + def get_steps(self) -> List[StoryStep]: + self._add_current_stories_to_result() + return self.story_steps + + def parse_data(self, data: List[Dict[Text, Any]]) -> None: + item_title = self._get_item_title() + + for item in data: + if not isinstance(item, dict): + common_utils.raise_warning( + f"Unexpected block found in '{self.source_name}':\n" + f"{item}\nItems under the " + f"'{self._get_plural_item_title()}' key must be YAML " + f"dictionaries. It will be skipped.", + docs=self._get_docs_link(), + ) + continue + + if item_title in item.keys(): + self._parse_plain_item(item) + + def _parse_plain_item(self, item: Dict[Text, Any]) -> None: + item_name = item.get(self._get_item_title(), "") + + if not item_name: + common_utils.raise_warning( + f"Issue found in '{self.source_name}': \n" + f"{item}\n" + f"The {self._get_item_title()} has an empty name. " + f"{self._get_plural_item_title().capitalize()} should " + f"have a name defined under '{self._get_item_title()}' " + f"key. It will be skipped.", + docs=self._get_docs_link(), + ) + + steps: List[Union[Text, Dict[Text, Any]]] = item.get(KEY_STEPS, []) + + if not steps: + common_utils.raise_warning( + f"Issue found in '{self.source_name}': " + f"The {self._get_item_title()} has no steps. " + f"It will be skipped.", + docs=self._get_docs_link(), + ) + return + + self._new_part(item_name, item) + + for step in steps: + self._parse_step(step) + + self._close_part(item) + + def _new_part(self, item_name: Text, item: Dict[Text, Any]) -> None: + raise NotImplementedError() + + def _close_part(self, item: Dict[Text, Any]) -> None: + pass + + def _parse_step(self, step: Union[Text, Dict[Text, Any]]) -> None: + if isinstance(step, str): + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"Found an unexpected step in the {self._get_item_title()} " + f"description:\n{step}\nThe step is of type `str` " + f"which is only allowed for the rule snippet action " + f"'{RULE_SNIPPET_ACTION_NAME}'. It will be skipped.", + docs=self._get_docs_link(), + ) + elif KEY_USER_INTENT in step.keys(): + self._parse_user_utterance(step) + elif KEY_OR in step.keys(): + self._parse_or_statement(step) + elif KEY_ACTION in step.keys(): + self._parse_action(step) + elif KEY_CHECKPOINT in step.keys(): + self._parse_checkpoint(step) + # This has to be after the checkpoint test as there can be a slot key within + # a checkpoint. + elif KEY_SLOT_NAME in step.keys(): + self._parse_slot(step) + elif KEY_ACTIVE_LOOP in step.keys(): + self._parse_active_loop(step[KEY_ACTIVE_LOOP]) + elif KEY_METADATA in step.keys(): + pass + else: + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"Found an unexpected step in the {self._get_item_title()} " + f"description:\n{step}\nIt will be skipped.", + docs=self._get_docs_link(), + ) + + def _get_item_title(self) -> Text: + raise NotImplementedError() + + def _get_plural_item_title(self) -> Text: + raise NotImplementedError() + + def _get_docs_link(self) -> Text: + raise NotImplementedError() + + def _parse_user_utterance(self, step: Dict[Text, Any]) -> None: + utterance = self._parse_raw_user_utterance(step) + if utterance: + self._validate_that_utterance_is_in_domain(utterance) + self.current_step_builder.add_user_messages([utterance]) + + def _validate_that_utterance_is_in_domain(self, utterance: UserUttered) -> None: + intent_name = utterance.intent.get(INTENT_NAME_KEY) + + if not self.domain: + logger.debug( + "Skipped validating if intent is in domain as domain " "is `None`." + ) + return + + if intent_name not in self.domain.intents: + common_utils.raise_warning( + f"Issue found in '{self.source_name}': \n" + f"Found intent '{intent_name}' in stories which is not part of the " + f"domain.", + docs=DOCS_URL_STORIES, + ) + + def _parse_or_statement(self, step: Dict[Text, Any]) -> None: + utterances = [] + + for utterance in step.get(KEY_OR): + if KEY_USER_INTENT in utterance.keys(): + utterance = self._parse_raw_user_utterance(utterance) + if utterance: + utterances.append(utterance) + else: + common_utils.raise_warning( + f"Issue found in '{self.source_name}': \n" + f"`OR` statement can only have '{KEY_USER_INTENT}' " + f"as a sub-element. This step will be skipped:\n" + f"'{utterance}'\n", + docs=self._get_docs_link(), + ) + return + + self.current_step_builder.add_user_messages(utterances) + + def _parse_raw_user_utterance(self, step: Dict[Text, Any]) -> Optional[UserUttered]: + user_utterance = step.get(KEY_USER_INTENT, "").strip() + + if not user_utterance: + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"User utterance cannot be empty. " + f"This {self._get_item_title()} step will be skipped:\n" + f"{step}", + docs=self._get_docs_link(), + ) + + raw_entities = step.get(KEY_ENTITIES, []) + final_entities = self._parse_raw_entities(raw_entities) + + if user_utterance.startswith(INTENT_MESSAGE_PREFIX): + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"User intent '{user_utterance}' starts with " + f"'{INTENT_MESSAGE_PREFIX}'. This is not required.", + docs=self._get_docs_link(), + ) + # Remove leading slash + user_utterance = user_utterance[1:] + + intent = {"name": user_utterance, "confidence": 1.0} + + return UserUttered(user_utterance, intent, final_entities) + + @staticmethod + def _parse_raw_entities( + raw_entities: Union[List[Dict[Text, Text]], List[Text]] + ) -> List[Dict[Text, Text]]: + final_entities = [] + for entity in raw_entities: + if isinstance(entity, dict): + for key, value in entity.items(): + final_entities.append({"entity": key, "value": value}) + else: + final_entities.append({"entity": entity, "value": ""}) + + return final_entities + + def _parse_slot(self, step: Dict[Text, Any]) -> None: + + for slot in step.get(KEY_CHECKPOINT_SLOTS, []): + if isinstance(slot, dict): + for key, value in slot.items(): + self._add_event(SlotSet.type_name, {key: value}) + elif isinstance(slot, str): + self._add_event(SlotSet.type_name, {slot: None}) + else: + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"Invalid slot: \n{slot}\n" + f"Items under the '{KEY_CHECKPOINT_SLOTS}' key must be " + f"YAML dictionaries or Strings. The checkpoint will be skipped.", + docs=self._get_docs_link(), + ) + return + + def _parse_action(self, step: Dict[Text, Any]) -> None: + + action_name = step.get(KEY_ACTION, "") + if not action_name: + common_utils.raise_warning( + f"Issue found in '{self.source_name}': \n" + f"Action name cannot be empty. " + f"This {self._get_item_title()} step will be skipped:\n" + f"{step}", + docs=self._get_docs_link(), + ) + return + + self._add_event(action_name, {}) + + def _parse_active_loop(self, active_loop_name: Optional[Text]) -> None: + self._add_event(ActiveLoop.type_name, {"name": active_loop_name}) + + def _parse_checkpoint(self, step: Dict[Text, Any]) -> None: + + checkpoint_name = step.get(KEY_CHECKPOINT, "") + slots = step.get(KEY_CHECKPOINT_SLOTS, []) + + slots_dict = {} + + for slot in slots: + if not isinstance(slot, dict): + common_utils.raise_warning( + f"Issue found in '{self.source_name}':\n" + f"Checkpoint '{checkpoint_name}' has an invalid slot: " + f"{slots}\nItems under the '{KEY_CHECKPOINT_SLOTS}' key must be " + f"YAML dictionaries. The checkpoint will be skipped.", + docs=self._get_docs_link(), + ) + return + + for key, value in slot.items(): + slots_dict[key] = value + + self._add_checkpoint(checkpoint_name, slots_dict) + + +class StoryParser(YAMLStoryReader): + """Encapsulate story-specific parser behavior.""" + + def _new_part(self, item_name: Text, item: Dict[Text, Any]) -> None: + self._new_story_part(item_name, self.source_name) + + def _get_item_title(self) -> Text: + return KEY_STORY_NAME + + def _get_plural_item_title(self) -> Text: + return KEY_STORIES + + def _get_docs_link(self) -> Text: + return DOCS_URL_STORIES + + +class RuleParser(YAMLStoryReader): + """Encapsulate rule-specific parser behavior.""" + + def _new_part(self, item_name: Text, item: Dict[Text, Any]) -> None: + self._new_rule_part(item_name, self.source_name) + conditions = item.get(KEY_RULE_CONDITION, []) + self._parse_rule_conditions(conditions) + if not item.get(KEY_RULE_FOR_CONVERSATION_START): + self._parse_rule_snippet_action() + + def _parse_rule_conditions( + self, conditions: List[Union[Text, Dict[Text, Any]]] + ) -> None: + for condition in conditions: + self._parse_step(condition) + + def _close_part(self, item: Dict[Text, Any]) -> None: + if item.get(KEY_WAIT_FOR_USER_INPUT_AFTER_RULE) is False: + self._parse_rule_snippet_action() + + def _get_item_title(self) -> Text: + return KEY_RULE_NAME + + def _get_plural_item_title(self) -> Text: + return KEY_RULES + + def _get_docs_link(self) -> Text: + return DOCS_URL_RULES + + def _parse_rule_snippet_action(self) -> None: + self._add_event(RULE_SNIPPET_ACTION_NAME, {}) diff --git a/tests/nlu/training/__init__.py b/rasa/core/training/story_writer/__init__.py similarity index 100% rename from tests/nlu/training/__init__.py rename to rasa/core/training/story_writer/__init__.py diff --git a/rasa/core/training/story_writer/yaml_story_writer.py b/rasa/core/training/story_writer/yaml_story_writer.py new file mode 100644 index 000000000000..4d7740b399b5 --- /dev/null +++ b/rasa/core/training/story_writer/yaml_story_writer.py @@ -0,0 +1,221 @@ +from collections import OrderedDict +from pathlib import Path + +import ruamel.yaml as ruamel_yaml +from typing import List, Text, Union, Optional + +from rasa.utils.common import raise_warning +from ruamel.yaml.scalarstring import DoubleQuotedScalarString + +from rasa.constants import LATEST_TRAINING_DATA_FORMAT_VERSION, DOCS_URL_STORIES +from rasa.core.events import UserUttered, ActionExecuted, SlotSet, ActiveLoop +from rasa.core.training.story_reader.yaml_story_reader import ( + KEY_STORIES, + KEY_STORY_NAME, + KEY_USER_INTENT, + KEY_ENTITIES, + KEY_ACTION, + KEY_STEPS, + KEY_CHECKPOINT, + KEY_SLOT_NAME, + KEY_CHECKPOINT_SLOTS, + KEY_OR, +) +from rasa.core.training.structures import StoryStep, Checkpoint + +import rasa.utils.io as io_utils + + +class YAMLStoryWriter: + """Writes Core training data into a file in a YAML format. """ + + def dumps(self, story_steps: List[StoryStep]) -> Text: + """Turns Story steps into a string. + + Args: + story_steps: Original story steps to be converted to the YAML. + + Returns: + String with story steps in the YAML format. + """ + stream = ruamel_yaml.StringIO() + self.dump(stream, story_steps) + return stream.getvalue() + + def dump( + self, + target: Union[Text, Path, ruamel_yaml.StringIO], + story_steps: List[StoryStep], + ) -> None: + """Writes Story steps into a target file/stream. + + Args: + target: name of the target file/stream to write the YAML to. + story_steps: Original story steps to be converted to the YAML. + """ + from rasa.validator import KEY_TRAINING_DATA_FORMAT_VERSION + + self.target = target + + stories = [] + for story_step in story_steps: + processed_story_step = self.process_story_step(story_step) + if processed_story_step: + stories.append(processed_story_step) + + result = OrderedDict() + result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString( + LATEST_TRAINING_DATA_FORMAT_VERSION + ) + result[KEY_STORIES] = stories + + io_utils.write_yaml(result, self.target, True) + + def process_story_step(self, story_step: StoryStep) -> Optional[OrderedDict]: + """Converts a single story step into an ordered dict. + + Args: + story_step: A single story step to be converted to the dict. + + Returns: + Dict with a story step. + """ + if self.story_contains_forms(story_step): + raise_warning( + f'File "{self.target}" contains a story "{story_step.block_name}" ' + f"that has form(s) in it. This story cannot be converted automatically " + f"because of the new Rules system in Rasa Open Source " + f"version {LATEST_TRAINING_DATA_FORMAT_VERSION}. " + f"Please convert this story manually, it will be skipped now.", + docs=DOCS_URL_STORIES, + ) + return None + + result = OrderedDict() + result[KEY_STORY_NAME] = story_step.block_name + steps = self.process_checkpoints(story_step.start_checkpoints) + + for event in story_step.events: + if isinstance(event, list): + utterances = self.process_or_utterances(event) + steps.append(utterances) + elif isinstance(event, UserUttered): + utterances = self.process_user_utterance(event) + steps.append(utterances) + elif isinstance(event, ActionExecuted): + steps.append(self.process_action(event)) + elif isinstance(event, SlotSet): + steps.append(self.process_slot(event)) + + steps.extend(self.process_checkpoints(story_step.end_checkpoints)) + + result[KEY_STEPS] = steps + + return result + + @staticmethod + def story_contains_forms(story_step) -> bool: + """Checks if the story step contains form actions. + + Args: + story_step: A single story step. + + Returns: + `True` if the `story_step` contains at least one form action, + `False` otherwise. + """ + return any( + [event for event in story_step.events if isinstance(event, ActiveLoop)] + ) + + @staticmethod + def process_user_utterance(user_utterance: UserUttered) -> OrderedDict: + """Converts a single user utterance into an ordered dict. + + Args: + user_utterance: Original user utterance object. + + Returns: + Dict with a user utterance. + """ + result = OrderedDict() + result[KEY_USER_INTENT] = user_utterance.intent["name"] + + if len(user_utterance.entities): + entities = [] + for entity in user_utterance.entities: + if entity["value"]: + entities.append(OrderedDict([(entity["entity"], entity["value"])])) + else: + entities.append(entity["entity"]) + result[KEY_ENTITIES] = entities + + return result + + @staticmethod + def process_action(action: ActionExecuted) -> OrderedDict: + """Converts a single action into an ordered dict. + + Args: + action: Original action object. + + Returns: + Dict with an action. + """ + result = OrderedDict() + result[KEY_ACTION] = action.action_name + + return result + + @staticmethod + def process_slot(event: SlotSet): + """Converts a single `SlotSet` event into an ordered dict. + + Args: + event: Original `SlotSet` event. + + Returns: + Dict with an `SlotSet` event. + """ + return OrderedDict([(KEY_SLOT_NAME, [{event.key: event.value}])]) + + @staticmethod + def process_checkpoints(checkpoints: List[Checkpoint]) -> List[OrderedDict]: + """Converts checkpoints event into an ordered dict. + + Args: + checkpoints: List of original checkpoint. + + Returns: + List of converted checkpoints. + """ + result = [] + for checkpoint in checkpoints: + next_checkpoint = OrderedDict([(KEY_CHECKPOINT, checkpoint.name)]) + if checkpoint.conditions: + next_checkpoint[KEY_CHECKPOINT_SLOTS] = [ + {key: value} for key, value in checkpoint.conditions.items() + ] + result.append(next_checkpoint) + return result + + def process_or_utterances(self, utterances: List[UserUttered]) -> OrderedDict: + """Converts user utterance containing the `OR` statement. + + Args: + utterances: User utterances belonging to the same `OR` statement. + + Returns: + Dict with converted user utterances. + """ + return OrderedDict( + [ + ( + KEY_OR, + [ + self.process_user_utterance(utterance) + for utterance in utterances + ], + ) + ] + ) diff --git a/rasa/core/training/structures.py b/rasa/core/training/structures.py index f16924f95cfd..e0af764aa4d3 100644 --- a/rasa/core/training/structures.py +++ b/rasa/core/training/structures.py @@ -1,23 +1,20 @@ import json import logging -import sys -import uuid from collections import deque, defaultdict -from typing import List, Text, Dict, Optional, Tuple, Any, Set, ValuesView + +import uuid +import typing +from typing import List, Text, Dict, Optional, Tuple, Any, Set, ValuesView, Union from rasa.core import utils -from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME from rasa.core.conversation import Dialogue from rasa.core.domain import Domain -from rasa.core.events import ( - UserUttered, - ActionExecuted, - Form, - FormValidation, - SlotSet, - Event, - ActionExecutionRejected, -) +from rasa.core.events import UserUttered, ActionExecuted, Event, SessionStarted +from rasa.core.trackers import DialogueStateTracker + +if typing.TYPE_CHECKING: + import networkx as nx logger = logging.getLogger(__name__) @@ -39,31 +36,7 @@ STEP_COUNT = 1 -class StoryStringHelper(object): - """A helper class to mark story steps that are inside a form with `form: ` - """ - - def __init__( - self, - active_form=None, - form_validation=True, - form_rejected=False, - form_prefix_string="", - no_form_prefix_string="", - ): - # track active form - self.active_form = active_form - # track whether a from should be validated - self.form_validation = form_validation - # track whether a from was rejected - self.form_rejected = form_rejected - # save story strings with form prefix for later - self.form_prefix_string = form_prefix_string - # save story strings without form prefix for later - self.no_form_prefix_string = no_form_prefix_string - - -class Checkpoint(object): +class Checkpoint: def __init__( self, name: Optional[Text], conditions: Optional[Dict[Text, Any]] = None ) -> None: @@ -71,11 +44,13 @@ def __init__( self.name = name self.conditions = conditions if conditions else {} - def as_story_string(self): + def as_story_string(self) -> Text: dumped_conds = json.dumps(self.conditions) if self.conditions else "" - return "{}{}".format(self.name, dumped_conds) + return f"{self.name}{dumped_conds}" - def filter_trackers(self, trackers): + def filter_trackers( + self, trackers: List[DialogueStateTracker] + ) -> List[DialogueStateTracker]: """Filters out all trackers that do not satisfy the conditions.""" if not self.conditions: @@ -85,13 +60,13 @@ def filter_trackers(self, trackers): trackers = [t for t in trackers if t.get_slot(slot_name) == slot_value] return trackers - def __repr__(self): + def __repr__(self) -> Text: return "Checkpoint(name={!r}, conditions={})".format( self.name, json.dumps(self.conditions) ) -class StoryStep(object): +class StoryStep: """A StoryStep is a section of a story block between two checkpoints. NOTE: Checkpoints are not only limited to those manually written @@ -104,221 +79,123 @@ def __init__( block_name: Optional[Text] = None, start_checkpoints: Optional[List[Checkpoint]] = None, end_checkpoints: Optional[List[Checkpoint]] = None, - events: Optional[List[Event]] = None, + events: Optional[List[Union[Event, List[Event]]]] = None, + source_name: Optional[Text] = None, + is_rule: bool = None, ) -> None: self.end_checkpoints = end_checkpoints if end_checkpoints else [] self.start_checkpoints = start_checkpoints if start_checkpoints else [] self.events = events if events else [] self.block_name = block_name + self.source_name = source_name + self.is_rule = is_rule # put a counter prefix to uuid to get reproducible sorting results global STEP_COUNT self.id = "{}_{}".format(STEP_COUNT, uuid.uuid4().hex) STEP_COUNT += 1 - self.story_string_helper = StoryStringHelper() - - def create_copy(self, use_new_id): + def create_copy(self, use_new_id: bool) -> "StoryStep": copied = StoryStep( self.block_name, self.start_checkpoints, self.end_checkpoints, self.events[:], + self.source_name, + self.is_rule, ) if not use_new_id: copied.id = self.id return copied - def add_user_message(self, user_message): + def add_user_message(self, user_message: UserUttered) -> None: self.add_event(user_message) - def add_event(self, event): + def add_event(self, event: Event) -> None: self.events.append(event) - @staticmethod - def _checkpoint_string(story_step_element): - return "> {}\n".format(story_step_element.as_story_string()) + def add_events(self, events: List[Event]) -> None: + self.events.append(events) @staticmethod - def _user_string(story_step_element, e2e, prefix=""): - return "* {}{}\n".format(prefix, story_step_element.as_story_string(e2e)) - - def _store_user_strings(self, story_step_element, e2e, prefix=""): - self.story_string_helper.no_form_prefix_string += self._user_string( - story_step_element, e2e - ) - self.story_string_helper.form_prefix_string += self._user_string( - story_step_element, e2e, prefix - ) + def _checkpoint_string(story_step_element: Checkpoint) -> Text: + return f"> {story_step_element.as_story_string()}\n" @staticmethod - def _bot_string(story_step_element, prefix=""): - return " - {}{}\n".format(prefix, story_step_element.as_story_string()) - - def _store_bot_strings(self, story_step_element, prefix=""): - self.story_string_helper.no_form_prefix_string += self._bot_string( - story_step_element - ) - self.story_string_helper.form_prefix_string += self._bot_string( - story_step_element, prefix - ) + def _user_string(story_step_element: UserUttered, e2e: bool) -> Text: + return f"* {story_step_element.as_story_string(e2e)}\n" - def _reset_stored_strings(self): - self.story_string_helper.form_prefix_string = "" - self.story_string_helper.no_form_prefix_string = "" + @staticmethod + def _bot_string(story_step_element: Event) -> Text: + return f" - {story_step_element.as_story_string()}\n" - def as_story_string(self, flat=False, e2e=False): + def as_story_string(self, flat: bool = False, e2e: bool = False) -> Text: # if the result should be flattened, we # will exclude the caption and any checkpoints. - - for s in self.start_checkpoints: - if s.name == STORY_START: - # first story step in the story, so reset helper - self.story_string_helper = StoryStringHelper() - if flat: result = "" else: - result = "\n## {}\n".format(self.block_name) + result = f"\n## {self.block_name}\n" for s in self.start_checkpoints: if s.name != STORY_START: result += self._checkpoint_string(s) for s in self.events: - if isinstance(s, UserUttered): - if self.story_string_helper.active_form is None: - result += self._user_string(s, e2e) - else: - # form is active - # it is not known whether the form will be - # successfully executed, so store this - # story string for later - self._store_user_strings(s, e2e, FORM_PREFIX) - - elif isinstance(s, Form): - # form got either activated or deactivated - self.story_string_helper.active_form = s.name - - if self.story_string_helper.active_form is None: - # form deactivated, so form succeeded, - # so add story string with form prefix - result += self.story_string_helper.form_prefix_string - # remove all stored story strings - self._reset_stored_strings() - - result += self._bot_string(s) - - elif isinstance(s, FormValidation): - self.story_string_helper.form_validation = s.validate - - elif isinstance(s, ActionExecutionRejected): - if s.action_name == self.story_string_helper.active_form: - # form rejected - self.story_string_helper.form_rejected = True - - elif isinstance(s, ActionExecuted): - if self._is_action_listen(s): - pass - elif self.story_string_helper.active_form is None: - result += self._bot_string(s) - else: - # form is active - if self.story_string_helper.form_rejected: - if ( - self.story_string_helper.form_prefix_string - and self.story_string_helper.form_validation - and s.action_name == self.story_string_helper.active_form - ): - # if there is something in `form_prefix_string`, - # add action_listen before it, - # because this form user input will be ignored by core - # and therefore action_listen will not be automatically - # added during reading the stories - result += self._bot_string( - ActionExecuted(ACTION_LISTEN_NAME) - ) - result += self.story_string_helper.form_prefix_string - elif self.story_string_helper.no_form_prefix_string: - result += self.story_string_helper.no_form_prefix_string - # form rejected, add story string without form prefix - result += self._bot_string(s) - else: - # form succeeded, so add story string with form prefix - result += self.story_string_helper.form_prefix_string - result += self._bot_string(s, FORM_PREFIX) - - # remove all stored story strings - self._reset_stored_strings() - - if s.action_name == self.story_string_helper.active_form: - # form was successfully executed - self.story_string_helper.form_rejected = False - - self.story_string_helper.form_validation = True - - elif isinstance(s, SlotSet): - if self.story_string_helper.active_form is None: - result += self._bot_string(s) - else: - # form is active - # it is not known whether the form will be - # successfully executed, so store this - # story string for later - # slots should be always printed without prefix - self._store_bot_strings(s) + if ( + self._is_action_listen(s) + or self._is_action_session_start(s) + or isinstance(s, SessionStarted) + ): + continue + if isinstance(s, UserUttered): + result += self._user_string(s, e2e) elif isinstance(s, Event): converted = s.as_story_string() if converted: - if self.story_string_helper.active_form is None: - result += self._bot_string(s) - else: - # form is active - # it is not known whether the form will be - # successfully executed, so store this - # story string for later - self._store_bot_strings(s, FORM_PREFIX) - + result += self._bot_string(s) else: - raise Exception("Unexpected element in story step: {}".format(s)) - - if ( - not self.end_checkpoints - and self.story_string_helper.active_form is not None - ): - # there are no end checkpoints - # form is active - # add story string with form prefix - result += self.story_string_helper.form_prefix_string - # remove all stored story strings - self._reset_stored_strings() + raise Exception(f"Unexpected element in story step: {s}") if not flat: - for e in self.end_checkpoints: - result += "> {}\n".format(e.as_story_string()) + for s in self.end_checkpoints: + result += self._checkpoint_string(s) return result @staticmethod - def _is_action_listen(event): + def _is_action_listen(event: Event) -> bool: # this is not an `isinstance` because # we don't want to allow subclasses here + # pytype: disable=attribute-error return type(event) == ActionExecuted and event.action_name == ACTION_LISTEN_NAME + # pytype: enable=attribute-error - def _add_action_listen(self, events): + @staticmethod + def _is_action_session_start(event: Event) -> bool: + # this is not an `isinstance` because + # we don't want to allow subclasses here + # pytype: disable=attribute-error + return ( + type(event) == ActionExecuted + and event.action_name == ACTION_SESSION_START_NAME + ) + # pytype: enable=attribute-error + + def _add_action_listen(self, events: List[Event]) -> None: if not events or not self._is_action_listen(events[-1]): # do not add second action_listen events.append(ActionExecuted(ACTION_LISTEN_NAME)) def explicit_events( self, domain: Domain, should_append_final_listen: bool = True - ) -> List[Event]: - """Returns events contained in the story step - including implicit events. + ) -> List[Union[Event, List[Event]]]: + """Returns events contained in the story step including implicit events. Not all events are always listed in the story dsl. This includes listen actions as well as implicitly set slots. This functions makes these events explicit and - returns them with the rest of the steps events.""" + returns them with the rest of the steps events. + """ events = [] @@ -335,22 +212,24 @@ def explicit_events( return events - def __repr__(self): + def __repr__(self) -> Text: return ( "StoryStep(" "block_name={!r}, " "start_checkpoints={!r}, " "end_checkpoints={!r}, " + "is_rule={!r}, " "events={!r})".format( self.block_name, self.start_checkpoints, self.end_checkpoints, + self.is_rule, self.events, ) ) -class Story(object): +class Story: def __init__( self, story_steps: List[StoryStep] = None, story_name: Optional[Text] = None ) -> None: @@ -358,7 +237,7 @@ def __init__( self.story_name = story_name @staticmethod - def from_events(events, story_name=None): + def from_events(events: List[Event], story_name: Optional[Text] = None) -> "Story": """Create a story from a list of events.""" story_step = StoryStep() @@ -366,7 +245,7 @@ def from_events(events, story_name=None): story_step.add_event(event) return Story([story_step], story_name) - def as_dialogue(self, sender_id, domain): + def as_dialogue(self, sender_id: Text, domain: Domain) -> Dialogue: events = [] for step in self.story_steps: events.extend( @@ -376,35 +255,29 @@ def as_dialogue(self, sender_id, domain): events.append(ActionExecuted(ACTION_LISTEN_NAME)) return Dialogue(sender_id, events) - def as_story_string(self, flat=False, e2e=False): + def as_story_string(self, flat: bool = False, e2e: bool = False) -> Text: story_content = "" - - # initialize helper for first story step - story_string_helper = StoryStringHelper() - for step in self.story_steps: - # use helper from previous story step - step.story_string_helper = story_string_helper - # create string for current story step story_content += step.as_story_string(flat, e2e) - # override helper for next story step - story_string_helper = step.story_string_helper if flat: if self.story_name: name = self.story_name else: name = "Generated Story {}".format(hash(story_content)) - return "## {}\n{}".format(name, story_content) + return f"## {name}\n{story_content}" else: return story_content - def dump_to_file(self, filename, flat=False, e2e=False): - with open(filename, "a", encoding="utf-8") as f: - f.write(self.as_story_string(flat, e2e)) + def dump_to_file( + self, filename: Text, flat: bool = False, e2e: bool = False + ) -> None: + from rasa.utils import io + io.write_text_file(self.as_story_string(flat, e2e), filename, append=True) -class StoryGraph(object): + +class StoryGraph: """Graph of the story-steps pooled from all stories in the training data.""" def __init__( @@ -467,15 +340,7 @@ def with_cycles_removed(self) -> "StoryGraph": # we need to remove the start steps and replace them with steps ending # in a special end checkpoint - # as in python 3.5, dict is not ordered, in order to generate - # reproducible result with random seed in python 3.5, we have - # to use OrderedDict - if sys.version_info >= (3, 6): - story_steps = {s.id: s for s in self.story_steps} - else: - from collections import OrderedDict - - story_steps = OrderedDict([(s.id, s) for s in self.story_steps]) + story_steps = {s.id: s for s in self.story_steps} # collect all overlapping checkpoints # we will remove unused start ones @@ -658,7 +523,7 @@ def as_story_string(self) -> Text: @staticmethod def order_steps( - story_steps: List[StoryStep] + story_steps: List[StoryStep], ) -> Tuple[deque, List[Tuple[Text, Text]]]: """Topological sort of the steps returning the ids of the steps.""" @@ -673,7 +538,7 @@ def order_steps( @staticmethod def _group_by_start_checkpoint( - story_steps: List[StoryStep] + story_steps: List[StoryStep], ) -> Dict[Text, List[StoryStep]]: """Returns all the start checkpoint of the steps""" @@ -733,7 +598,7 @@ def dfs(node): return ordered, sorted(removed_edges) - def visualize(self, output_file=None): + def visualize(self, output_file: Optional[Text] = None) -> "nx.MultiDiGraph": import networkx as nx from rasa.core.training import visualization # pytype: disable=pyi-error from colorhash import ColorHash @@ -742,7 +607,7 @@ def visualize(self, output_file=None): next_node_idx = [0] nodes = {"STORY_START": 0, "STORY_END": -1} - def ensure_checkpoint_is_drawn(cp): + def ensure_checkpoint_is_drawn(cp: Checkpoint) -> None: if cp.name not in nodes: next_node_idx[0] += 1 nodes[cp.name] = next_node_idx[0] diff --git a/rasa/core/training/visualization.py b/rasa/core/training/visualization.py index 764469ac752a..a8368ba1719f 100644 --- a/rasa/core/training/visualization.py +++ b/rasa/core/training/visualization.py @@ -1,8 +1,7 @@ from collections import defaultdict, deque import random -import re -from typing import Any, Text, List, Dict, Optional, TYPE_CHECKING +from typing import Any, Text, List, Dict, Optional, TYPE_CHECKING, Set from rasa.core.actions.action import ACTION_LISTEN_NAME from rasa.core.domain import Domain @@ -24,14 +23,14 @@ VISUALIZATION_TEMPLATE_PATH = "/visualization.html" -class UserMessageGenerator(object): - def __init__(self, nlu_training_data): +class UserMessageGenerator: + def __init__(self, nlu_training_data) -> None: self.nlu_training_data = nlu_training_data self.mapping = self._create_reverse_mapping(self.nlu_training_data) @staticmethod def _create_reverse_mapping( - data: "TrainingData" + data: "TrainingData", ) -> Dict[Dict[Text, Any], List["Message"]]: """Create a mapping from intent to messages @@ -44,12 +43,12 @@ def _create_reverse_mapping( return d @staticmethod - def _contains_same_entity(entities, e): + def _contains_same_entity(entities, e) -> bool: return entities.get(e.get("entity")) is None or entities.get( e.get("entity") ) != e.get("value") - def message_for_data(self, structured_info): + def message_for_data(self, structured_info) -> Any: """Find a data sample with the same intent and entities. Given the parsed data from a message (intent and entities) finds a @@ -71,7 +70,7 @@ def message_for_data(self, structured_info): return structured_info.get("text") -def _fingerprint_node(graph, node, max_history): +def _fingerprint_node(graph, node, max_history) -> Set[Text]: """Fingerprint a node in a graph. Can be used to identify nodes that are similar and can be merged within the @@ -108,20 +107,20 @@ def _fingerprint_node(graph, node, max_history): if empty: continuations.append(candidate) return { - " - ".join([graph.node[node]["label"] for node in continuation]) + " - ".join([graph.nodes[node]["label"] for node in continuation]) for continuation in continuations } -def _incoming_edges(graph, node): +def _incoming_edges(graph, node) -> set: return {(prev_node, k) for prev_node, _, k in graph.in_edges(node, keys=True)} -def _outgoing_edges(graph, node): +def _outgoing_edges(graph, node) -> set: return {(succ_node, k) for _, succ_node, k in graph.out_edges(node, keys=True)} -def _outgoing_edges_are_similar(graph, node_a, node_b): +def _outgoing_edges_are_similar(graph, node_a, node_b) -> bool: """If the outgoing edges from the two nodes are similar enough, it doesn't matter if you are in a or b. @@ -142,9 +141,9 @@ def _outgoing_edges_are_similar(graph, node_a, node_b): return a_edges == b_edges or not a_edges or not b_edges -def _nodes_are_equivalent(graph, node_a, node_b, max_history): +def _nodes_are_equivalent(graph, node_a, node_b, max_history) -> bool: """Decides if two nodes are equivalent based on their fingerprints.""" - return graph.node[node_a]["label"] == graph.node[node_b]["label"] and ( + return graph.nodes[node_a]["label"] == graph.nodes[node_b]["label"] and ( _outgoing_edges_are_similar(graph, node_a, node_b) or _incoming_edges(graph, node_a) == _incoming_edges(graph, node_b) or _fingerprint_node(graph, node_a, max_history) @@ -152,7 +151,7 @@ def _nodes_are_equivalent(graph, node_a, node_b, max_history): ) -def _add_edge(graph, u, v, key, label=None, **kwargs): +def _add_edge(graph, u, v, key, label=None, **kwargs) -> None: """Adds an edge to the graph if the edge is not already present. Uses the label as the key.""" @@ -169,7 +168,7 @@ def _add_edge(graph, u, v, key, label=None, **kwargs): _transfer_style(kwargs, d) -def _transfer_style(source, target): +def _transfer_style(source, target: Dict[Text, Any]) -> Dict[Text, Any]: """Copy over class names from source to target for all special classes. Used if a node is highlighted and merged with another node.""" @@ -189,7 +188,7 @@ def _transfer_style(source, target): return target -def _merge_equivalent_nodes(graph, max_history): +def _merge_equivalent_nodes(graph, max_history) -> None: """Searches for equivalent nodes in the graph and merges them.""" changed = True @@ -223,7 +222,7 @@ def _merge_equivalent_nodes(graph, max_history): succ_node, k, d.get("label"), - **{"class": d.get("class", "")} + **{"class": d.get("class", "")}, ) graph.remove_edge(j, succ_node) # moves all incoming edges to the other node @@ -235,7 +234,7 @@ def _merge_equivalent_nodes(graph, max_history): i, k, d.get("label"), - **{"class": d.get("class", "")} + **{"class": d.get("class", "")}, ) graph.remove_edge(prev_node, j) graph.remove_node(j) @@ -243,7 +242,7 @@ def _merge_equivalent_nodes(graph, max_history): async def _replace_edge_labels_with_nodes( graph, next_id, interpreter, nlu_training_data -): +) -> None: """User messages are created as edge labels. This removes the labels and creates nodes instead. @@ -269,39 +268,39 @@ async def _replace_edge_labels_with_nodes( graph.remove_edge(s, e, k) graph.add_node( next_id, - label=sanitize(label), + label=label, shape="rect", style="filled", fillcolor="lightblue", - **_transfer_style(d, {"class": "intent"}) + **_transfer_style(d, {"class": "intent"}), ) graph.add_edge(s, next_id, **{"class": d.get("class", "")}) graph.add_edge(next_id, e, **{"class": d.get("class", "")}) -def visualization_html_path(): +def visualization_html_path() -> Text: import pkg_resources return pkg_resources.resource_filename(__name__, VISUALIZATION_TEMPLATE_PATH) -def persist_graph(graph, output_file): +def persist_graph(graph: "networkx.Graph", output_file: Text) -> None: """Plots the graph and persists it into a html file.""" import networkx as nx + import rasa.utils.io as io_utils expg = nx.nx_pydot.to_pydot(graph) - with open(visualization_html_path(), "r") as file: - template = file.read() + template = io_utils.read_file(visualization_html_path()) - # customize content of template by replacing tags + # Insert graph into template template = template.replace("// { is-client }", "isClient = true", 1) - template = template.replace( - "// { graph-content }", "graph = `{}`".format(expg.to_string()), 1 - ) + graph_as_text = expg.to_string() + # escape backslashes + graph_as_text = graph_as_text.replace("\\", "\\\\") + template = template.replace("// { graph-content }", f"graph = `{graph_as_text}`", 1) - with open(output_file, "w", encoding="utf-8") as file: - file.write(template) + io_utils.write_text_file(template, output_file) def _length_of_common_action_prefix(this: List[Event], other: List[Event]) -> int: @@ -336,7 +335,7 @@ def _add_default_nodes(graph: "networkx.MultiDiGraph", fontsize: int = 12) -> No fillcolor="green", style="filled", fontsize=fontsize, - **{"class": "start active"} + **{"class": "start active"}, ) graph.add_node( END_NODE_ID, @@ -344,7 +343,7 @@ def _add_default_nodes(graph: "networkx.MultiDiGraph", fontsize: int = 12) -> No fillcolor="red", style="filled", fontsize=fontsize, - **{"class": "end"} + **{"class": "end"}, ) graph.add_node(TMP_NODE_ID, label="TMP", style="invis", **{"class": "invisible"}) @@ -359,13 +358,6 @@ def _create_graph(fontsize: int = 12) -> "networkx.MultiDiGraph": return graph -def sanitize(s): - if s: - return re.escape(s) - else: - return s - - def _add_message_edge( graph: "networkx.MultiDiGraph", message: Optional[Dict[Text, Any]], @@ -388,7 +380,7 @@ def _add_message_edge( next_node_idx, message_key, message_label, - **{"class": "active" if is_current else ""} + **{"class": "active" if is_current else ""}, ) @@ -440,7 +432,7 @@ async def visualize_neighborhood( next_node_idx, label=el.action_name, fontsize=fontsize, - **{"class": "active" if is_current else ""} + **{"class": "active" if is_current else ""}, ) _add_message_edge( @@ -464,9 +456,9 @@ async def visualize_neighborhood( next_node_idx, label=" ? " if not message - else sanitize(message.get("intent", {}).get("name", " ? ")), + else message.get("intent", {}).get("name", " ? "), shape="rect", - **{"class": "intent dashed active"} + **{"class": "intent dashed active"}, ) target = next_node_idx elif current_node: diff --git a/rasa/core/utils.py b/rasa/core/utils.py index c8f061997f9a..d3f997825a81 100644 --- a/rasa/core/utils.py +++ b/rasa/core/utils.py @@ -1,27 +1,46 @@ -# -*- coding: utf-8 -*- import argparse import json import logging +import os import re import sys from asyncio import Future +from decimal import Decimal from hashlib import md5, sha1 from io import StringIO from pathlib import Path -from typing import Any, Dict, List, Optional, Set, TYPE_CHECKING, Text, Tuple, Callable -from typing import Union +from typing import ( + Any, + Callable, + Dict, + Generator, + List, + Optional, + Set, + TYPE_CHECKING, + Text, + Tuple, + Union, +) import aiohttp -from aiohttp import InvalidURL -from sanic import Sanic -from sanic.views import CompositionView - +import numpy as np import rasa.utils.io as io_utils +from aiohttp import InvalidURL +from rasa.constants import ( + DEFAULT_SANIC_WORKERS, + ENV_SANIC_WORKERS, + DEFAULT_ENDPOINTS_PATH, + YAML_VERSION, +) # backwards compatibility 1.0.x # noinspection PyUnresolvedReferences -from rasa.utils.endpoints import concat_url -from rasa.utils.endpoints import read_endpoint_config +from rasa.core.lock_store import LockStore, RedisLockStore +from rasa.utils.endpoints import EndpointConfig, read_endpoint_config +from sanic import Sanic +from sanic.views import CompositionView +import rasa.cli.utils as cli_utils logger = logging.getLogger(__name__) @@ -29,12 +48,20 @@ from random import Random -def configure_file_logging(logger_obj: logging.Logger, log_file: Optional[Text]): +def configure_file_logging( + logger_obj: logging.Logger, log_file: Optional[Text] +) -> None: + """Configure logging to a file. + + Args: + logger_obj: Logger object to configure. + log_file: Path of log file to write to. + """ if not log_file: return formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s") - file_handler = logging.FileHandler(log_file, encoding="utf-8") + file_handler = logging.FileHandler(log_file, encoding=io_utils.DEFAULT_ENCODING) file_handler.setLevel(logger_obj.level) file_handler.setFormatter(formatter) logger_obj.addHandler(file_handler) @@ -45,20 +72,6 @@ def module_path_from_instance(inst: Any) -> Text: return inst.__module__ + "." + inst.__class__.__name__ -def dump_obj_as_json_to_file(filename: Text, obj: Any) -> None: - """Dump an object as a json string to a file.""" - - dump_obj_as_str_to_file(filename, json.dumps(obj, indent=2)) - - -def dump_obj_as_str_to_file(filename: Text, text: Text) -> None: - """Dump a text to a file.""" - - with open(filename, "w", encoding="utf-8") as f: - # noinspection PyTypeChecker - f.write(str(text)) - - def subsample_array( arr: List[Any], max_values: int, @@ -89,58 +102,48 @@ def is_int(value: Any) -> bool: return False -def one_hot(hot_idx, length, dtype=None): - import numpy +def one_hot(hot_idx: int, length: int, dtype: Optional[Text] = None) -> np.ndarray: + """Create a one-hot array. + + Args: + hot_idx: Index of the hot element. + length: Length of the array. + dtype: ``numpy.dtype`` of the array. + Returns: + One-hot array. + """ if hot_idx >= length: raise ValueError( "Can't create one hot. Index '{}' is out " "of range (length '{}')".format(hot_idx, length) ) - r = numpy.zeros(length, dtype) + r = np.zeros(length, dtype) r[hot_idx] = 1 return r -def str_range_list(start, end): - return [str(e) for e in range(start, end)] +def generate_id(prefix: Text = "", max_chars: Optional[int] = None) -> Text: + """Generate a random UUID. + Args: + prefix: String to prefix the ID with. + max_chars: Maximum number of characters. -def generate_id(prefix="", max_chars=None): + Returns: + Generated random UUID. + """ import uuid gid = uuid.uuid4().hex if max_chars: gid = gid[:max_chars] - return "{}{}".format(prefix, gid) - - -def request_input(valid_values=None, prompt=None, max_suggested=3): - def wrong_input_message(): - print ( - "Invalid answer, only {}{} allowed\n".format( - ", ".join(valid_values[:max_suggested]), - ",..." if len(valid_values) > max_suggested else "", - ) - ) - - while True: - try: - input_value = input(prompt) if prompt else input() - if valid_values is not None and input_value not in valid_values: - wrong_input_message() - continue - except ValueError: - wrong_input_message() - continue - return input_value + return f"{prefix}{gid}" # noinspection PyPep8Naming - - -class HashableNDArray(object): +class HashableNDArray: """Hashable wrapper for ndarray objects. Instances of ndarray are not hashable, meaning they cannot be added to @@ -154,7 +157,7 @@ class HashableNDArray(object): or the original object (which requires the user to be careful enough not to modify it).""" - def __init__(self, wrapped, tight=False): + def __init__(self, wrapped, tight=False) -> None: """Creates a new hashable object encapsulating an ndarray. wrapped @@ -164,48 +167,53 @@ def __init__(self, wrapped, tight=False): Optional. If True, a copy of the input ndaray is created. Defaults to False. """ - from numpy import array self.__tight = tight - self.__wrapped = array(wrapped) if tight else wrapped + self.__wrapped = np.array(wrapped) if tight else wrapped self.__hash = int(sha1(wrapped.view()).hexdigest(), 16) - def __eq__(self, other): - from numpy import all - - return all(self.__wrapped == other.__wrapped) + def __eq__(self, other) -> bool: + return np.all(self.__wrapped == other.__wrapped) - def __hash__(self): + def __hash__(self) -> int: return self.__hash - def unwrap(self): + def unwrap(self) -> np.ndarray: """Returns the encapsulated ndarray. If the wrapper is "tight", a copy of the encapsulated ndarray is returned. Otherwise, the encapsulated ndarray itself is returned.""" - from numpy import array if self.__tight: - return array(self.__wrapped) + return np.array(self.__wrapped) return self.__wrapped -def _dump_yaml(obj, output): +def _dump_yaml(obj: Dict, output: Union[Text, Path, StringIO]) -> None: import ruamel.yaml yaml_writer = ruamel.yaml.YAML(pure=True, typ="safe") yaml_writer.unicode_supplementary = True yaml_writer.default_flow_style = False - yaml_writer.version = "1.1" + yaml_writer.version = YAML_VERSION yaml_writer.dump(obj, output) -def dump_obj_as_yaml_to_file(filename: Union[Text, Path], obj: Dict) -> None: - """Writes data (python dict) to the filename in yaml repr.""" - with open(str(filename), "w", encoding="utf-8") as output: - _dump_yaml(obj, output) +def dump_obj_as_yaml_to_file( + filename: Union[Text, Path], obj: Any, should_preserve_key_order: bool = False +) -> None: + """Writes `obj` to the filename in YAML repr. + + Args: + filename: Target filename. + obj: Object to dump. + should_preserve_key_order: Whether to preserve key order in `obj`. + """ + io_utils.write_yaml( + obj, filename, should_preserve_key_order=should_preserve_key_order + ) def dump_obj_as_yaml_to_string(obj: Dict) -> Text: @@ -235,7 +243,7 @@ def find_route(suffix, path): options = {} for arg in route.parameters: - options[arg] = "[{0}]".format(arg) + options[arg] = f"[{arg}]" if not isinstance(route.handler, CompositionView): handlers = [(list(route.methods)[0], route.name)] @@ -246,16 +254,16 @@ def find_route(suffix, path): ] for method, name in handlers: - line = unquote("{:50s} {:30s} {}".format(endpoint, method, name)) + line = unquote(f"{endpoint:50s} {method:30s} {name}") output[name] = line url_table = "\n".join(output[url] for url in sorted(output)) - logger.debug("Available web server routes: \n{}".format(url_table)) + logger.debug(f"Available web server routes: \n{url_table}") return output -def cap_length(s, char_limit=20, append_ellipsis=True): +def cap_length(s: Text, char_limit: int = 20, append_ellipsis: bool = True) -> Text: """Makes sure the string doesn't exceed the passed char limit. Appends an ellipsis if the string is too long.""" @@ -295,16 +303,27 @@ def all_subclasses(cls: Any) -> List[Any]: ] -def is_limit_reached(num_messages, limit): +def is_limit_reached(num_messages: int, limit: int) -> bool: + """Determine whether the number of messages has reached a limit. + + Args: + num_messages: The number of messages to check. + limit: Limit on the number of messages. + + Returns: + `True` if the limit has been reached, otherwise `False`. + """ return limit is not None and num_messages >= limit -def read_lines(filename, max_line_limit=None, line_pattern=".*"): +def read_lines( + filename, max_line_limit=None, line_pattern=".*" +) -> Generator[Text, Any, None]: """Read messages from the command line and print bot responses.""" line_filter = re.compile(line_pattern) - with open(filename, "r", encoding="utf-8") as f: + with open(filename, "r", encoding=io_utils.DEFAULT_ENCODING) as f: num_messages = 0 for line in f: m = line_filter.match(line) @@ -326,7 +345,7 @@ def convert_bytes_to_string(data: Union[bytes, bytearray, Text]) -> Text: """Convert `data` to string if it is a bytes-like object.""" if isinstance(data, (bytes, bytearray)): - return data.decode("utf-8") + return data.decode(io_utils.DEFAULT_ENCODING) return data @@ -336,12 +355,12 @@ def get_file_hash(path: Text) -> Text: return md5(file_as_bytes(path)).hexdigest() -def get_text_hash(text: Text, encoding: Text = "utf-8") -> Text: +def get_text_hash(text: Text, encoding: Text = io_utils.DEFAULT_ENCODING) -> Text: """Calculate the md5 hash for a text.""" return md5(text.encode(encoding)).hexdigest() -def get_dict_hash(data: Dict, encoding: Text = "utf-8") -> Text: +def get_dict_hash(data: Dict, encoding: Text = io_utils.DEFAULT_ENCODING) -> Text: """Calculate the md5 hash of a dictionary.""" return md5(json.dumps(data, sort_keys=True).encode(encoding)).hexdigest() @@ -383,11 +402,11 @@ def pad_lists_to_size( return list_x, list_y -class AvailableEndpoints(object): +class AvailableEndpoints: """Collection of configured endpoints.""" @classmethod - def read_endpoints(cls, endpoint_file): + def read_endpoints(cls, endpoint_file: Text) -> "AvailableEndpoints": nlg = read_endpoint_config(endpoint_file, endpoint_type="nlg") nlu = read_endpoint_config(endpoint_file, endpoint_type="nlu") action = read_endpoint_config(endpoint_file, endpoint_type="action_endpoint") @@ -402,14 +421,14 @@ def read_endpoints(cls, endpoint_file): def __init__( self, - nlg=None, - nlu=None, - action=None, - model=None, - tracker_store=None, - lock_store=None, - event_broker=None, - ): + nlg: Optional[EndpointConfig] = None, + nlu: Optional[EndpointConfig] = None, + action: Optional[EndpointConfig] = None, + model: Optional[EndpointConfig] = None, + tracker_store: Optional[EndpointConfig] = None, + lock_store: Optional[EndpointConfig] = None, + event_broker: Optional[EndpointConfig] = None, + ) -> None: self.model = model self.action = action self.nlu = nlu @@ -419,8 +438,27 @@ def __init__( self.event_broker = event_broker +def read_endpoints_from_path( + endpoints_path: Union[Path, Text, None] = None +) -> AvailableEndpoints: + """Get `AvailableEndpoints` object from specified path. + + Args: + endpoints_path: Path of the endpoints file to be read. If `None` the + default path for that file is used (`endpoints.yml`). + + Returns: + `AvailableEndpoints` object read from endpoints file. + + """ + endpoints_config_path = cli_utils.get_validated_path( + endpoints_path, "endpoints", DEFAULT_ENDPOINTS_PATH, True + ) + return AvailableEndpoints.read_endpoints(endpoints_config_path) + + # noinspection PyProtectedMember -def set_default_subparser(parser, default_subparser): +def set_default_subparser(parser, default_subparser) -> None: """default subparser selection. Call after setup, just before parse_args() parser: the name of the parser you're making changes to @@ -457,3 +495,108 @@ def handler(fut: Future) -> None: ) return handler + + +def replace_floats_with_decimals(obj: Any, round_digits: int = 9) -> Any: + """Convert all instances in `obj` of `float` to `Decimal`. + + Args: + obj: Input object. + round_digits: Rounding precision of `Decimal` values. + + Returns: + Input `obj` with all `float` types replaced by `Decimal`s rounded to + `round_digits` decimal places. + """ + + def _float_to_rounded_decimal(s: Text) -> Decimal: + return Decimal(s).quantize(Decimal(10) ** -round_digits) + + return json.loads(json.dumps(obj), parse_float=_float_to_rounded_decimal) + + +class DecimalEncoder(json.JSONEncoder): + """`json.JSONEncoder` that dumps `Decimal`s as `float`s.""" + + def default(self, obj: Any) -> Any: + """Get serializable object for `o`. + + Args: + obj: Object to serialize. + + Returns: + `obj` converted to `float` if `o` is a `Decimals`, else the base class + `default()` method. + """ + if isinstance(obj, Decimal): + return float(obj) + return super().default(obj) + + +def replace_decimals_with_floats(obj: Any) -> Any: + """Convert all instances in `obj` of `Decimal` to `float`. + + Args: + obj: A `List` or `Dict` object. + + Returns: + Input `obj` with all `Decimal` types replaced by `float`s. + """ + return json.loads(json.dumps(obj, cls=DecimalEncoder)) + + +def _lock_store_is_redis_lock_store( + lock_store: Union[EndpointConfig, LockStore, None] +) -> bool: + if isinstance(lock_store, RedisLockStore): + return True + + if isinstance(lock_store, LockStore): + return False + + # `lock_store` is `None` or `EndpointConfig` + return lock_store is not None and lock_store.type == "redis" + + +def number_of_sanic_workers(lock_store: Union[EndpointConfig, LockStore, None]) -> int: + """Get the number of Sanic workers to use in `app.run()`. + + If the environment variable constants.ENV_SANIC_WORKERS is set and is not equal to + 1, that value will only be permitted if the used lock store supports shared + resources across multiple workers (e.g. ``RedisLockStore``). + """ + + def _log_and_get_default_number_of_workers(): + logger.debug( + f"Using the default number of Sanic workers ({DEFAULT_SANIC_WORKERS})." + ) + return DEFAULT_SANIC_WORKERS + + try: + env_value = int(os.environ.get(ENV_SANIC_WORKERS, DEFAULT_SANIC_WORKERS)) + except ValueError: + logger.error( + f"Cannot convert environment variable `{ENV_SANIC_WORKERS}` " + f"to int ('{os.environ[ENV_SANIC_WORKERS]}')." + ) + return _log_and_get_default_number_of_workers() + + if env_value == DEFAULT_SANIC_WORKERS: + return _log_and_get_default_number_of_workers() + + if env_value < 1: + logger.debug( + f"Cannot set number of Sanic workers to the desired value " + f"({env_value}). The number of workers must be at least 1." + ) + return _log_and_get_default_number_of_workers() + + if _lock_store_is_redis_lock_store(lock_store): + logger.debug(f"Using {env_value} Sanic workers.") + return env_value + + logger.debug( + f"Unable to assign desired number of Sanic workers ({env_value}) as " + f"no `RedisLockStore` endpoint configuration has been found." + ) + return _log_and_get_default_number_of_workers() diff --git a/rasa/core/validator.py b/rasa/core/validator.py deleted file mode 100644 index 1feb3834d85e..000000000000 --- a/rasa/core/validator.py +++ /dev/null @@ -1,173 +0,0 @@ -import logging -import asyncio -from typing import List, Set, Text -from rasa.core.domain import Domain -from rasa.importers.importer import TrainingDataImporter -from rasa.nlu.training_data import TrainingData -from rasa.core.training.dsl import StoryStep -from rasa.core.training.dsl import UserUttered -from rasa.core.training.dsl import ActionExecuted -from rasa.core.constants import UTTER_PREFIX - -logger = logging.getLogger(__name__) - - -class Validator(object): - """A class used to verify usage of intents and utterances.""" - - def __init__(self, domain: Domain, intents: TrainingData, stories: List[StoryStep]): - """Initializes the Validator object. """ - - self.domain = domain - self.intents = intents - self.stories = stories - - @classmethod - async def from_importer(cls, importer: TrainingDataImporter) -> "Validator": - """Create an instance from the domain, nlu and story files.""" - - domain = await importer.get_domain() - stories = await importer.get_stories() - intents = await importer.get_nlu_data() - - return cls(domain, intents, stories.story_steps) - - def verify_intents(self, ignore_warnings: bool = True) -> bool: - """Compares list of intents in domain with intents in NLU training data.""" - - everything_is_alright = True - - nlu_data_intents = {e.data["intent"] for e in self.intents.intent_examples} - - for intent in self.domain.intents: - if intent not in nlu_data_intents: - logger.warning( - "The intent '{}' is listed in the domain file, but " - "is not found in the NLU training data.".format(intent) - ) - everything_is_alright = ignore_warnings and everything_is_alright - - for intent in nlu_data_intents: - if intent not in self.domain.intents: - logger.error( - "The intent '{}' is in the NLU training data, but " - "is not listed in the domain.".format(intent) - ) - everything_is_alright = False - - return everything_is_alright - - def verify_intents_in_stories(self, ignore_warnings: bool = True) -> bool: - """Checks intents used in stories. - - Verifies if the intents used in the stories are valid, and whether - all valid intents are used in the stories.""" - - everything_is_alright = self.verify_intents() - - stories_intents = { - event.intent["name"] - for story in self.stories - for event in story.events - if type(event) == UserUttered - } - - for story_intent in stories_intents: - if story_intent not in self.domain.intents: - logger.error( - "The intent '{}' is used in stories, but is not " - "listed in the domain file.".format(story_intent) - ) - everything_is_alright = False - - for intent in self.domain.intents: - if intent not in stories_intents: - logger.warning( - "The intent '{}' is not used in any story.".format(intent) - ) - everything_is_alright = ignore_warnings and everything_is_alright - - return everything_is_alright - - def _gather_utterance_actions(self) -> Set[Text]: - """Return all utterances which are actions.""" - return { - utterance - for utterance in self.domain.templates.keys() - if utterance in self.domain.action_names - } - - def verify_utterances(self, ignore_warnings: bool = True) -> bool: - """Compares list of utterances in actions with utterances in templates.""" - - actions = self.domain.action_names - utterance_templates = set(self.domain.templates) - everything_is_alright = True - - for utterance in utterance_templates: - if utterance not in actions: - logger.warning( - "The utterance '{}' is not listed under 'actions' in the " - "domain file. It can only be used as a template.".format(utterance) - ) - everything_is_alright = ignore_warnings and everything_is_alright - - for action in actions: - if action.startswith(UTTER_PREFIX): - if action not in utterance_templates: - logger.error( - "There is no template for utterance '{}'.".format(action) - ) - everything_is_alright = False - - return everything_is_alright - - def verify_utterances_in_stories(self, ignore_warnings: bool = True) -> bool: - """Verifies usage of utterances in stories. - - Checks whether utterances used in the stories are valid, - and whether all valid utterances are used in stories.""" - - everything_is_alright = self.verify_utterances() - - utterance_actions = self._gather_utterance_actions() - stories_utterances = set() - - for story in self.stories: - for event in story.events: - if not isinstance(event, ActionExecuted): - continue - if not event.action_name.startswith(UTTER_PREFIX): - # we are only interested in utter actions - continue - - if event.action_name in stories_utterances: - # we already processed this one before, we only want to warn once - continue - - if event.action_name not in utterance_actions: - logger.error( - "The utterance '{}' is used in stories, but is not a " - "valid utterance.".format(event.action_name) - ) - everything_is_alright = False - stories_utterances.add(event.action_name) - - for utterance in utterance_actions: - if utterance not in stories_utterances: - logger.warning( - "The utterance '{}' is not used in any story.".format(utterance) - ) - everything_is_alright = ignore_warnings and everything_is_alright - - return everything_is_alright - - def verify_all(self, ignore_warnings: bool = True) -> bool: - """Runs all the validations on intents and utterances.""" - - logger.info("Validating intents...") - intents_are_valid = self.verify_intents_in_stories(ignore_warnings) - - logger.info("Validating utterances...") - stories_are_valid = self.verify_utterances_in_stories(ignore_warnings) - return intents_are_valid and stories_are_valid diff --git a/rasa/core/visualize.py b/rasa/core/visualize.py index df75b75e5022..e12b489c53c9 100644 --- a/rasa/core/visualize.py +++ b/rasa/core/visualize.py @@ -54,7 +54,7 @@ async def visualize( ) full_output_path = "file://{}".format(os.path.abspath(output_path)) - logger.info("Finished graph creation. Saved into {}".format(full_output_path)) + logger.info(f"Finished graph creation. Saved into {full_output_path}") import webbrowser diff --git a/rasa/data.py b/rasa/data.py index a080a9da7162..e5bdd0ca62a6 100644 --- a/rasa/data.py +++ b/rasa/data.py @@ -3,12 +3,24 @@ import shutil import tempfile import uuid -import re -from typing import Tuple, List, Text, Set, Union, Optional -from rasa.nlu.training_data import loading +from pathlib import Path +from typing import Tuple, List, Text, Set, Union, Optional, Iterable + +from rasa.constants import DEFAULT_E2E_TESTS_PATH +from rasa.nlu.training_data import loading as nlu_loading logger = logging.getLogger(__name__) +MARKDOWN_FILE_EXTENSIONS = {".md"} + +YAML_FILE_EXTENSIONS = {".yml", ".yaml"} + +JSON_FILE_EXTENSIONS = {".json"} + +TRAINING_DATA_EXTENSIONS = JSON_FILE_EXTENSIONS.union(MARKDOWN_FILE_EXTENSIONS).union( + YAML_FILE_EXTENSIONS +) + def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: """Recursively collects all Core training files from a list of paths. @@ -59,7 +71,7 @@ def get_core_nlu_directories( def get_core_nlu_files( paths: Optional[Union[Text, List[Text]]] -) -> Tuple[Set[Text], Set[Text]]: +) -> Tuple[List[Text], List[Text]]: """Recursively collects all training files from a list of paths. Args: @@ -94,15 +106,17 @@ def get_core_nlu_files( story_files.update(new_story_files) nlu_data_files.update(new_nlu_data_files) - return story_files, nlu_data_files + return sorted(story_files), sorted(nlu_data_files) def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]: story_files = set() nlu_data_files = set() - for root, _, files in os.walk(directory): - for f in files: + for root, _, files in os.walk(directory, followlinks=True): + # we sort the files here to ensure consistent order for repeatable training + # results + for f in sorted(files): full_path = os.path.join(root, f) if not _is_valid_filetype(full_path): @@ -117,10 +131,7 @@ def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[ def _is_valid_filetype(path: Text) -> bool: - is_file = os.path.isfile(path) - is_datafile = path.endswith(".json") or path.endswith(".md") - - return is_file and is_datafile + return os.path.isfile(path) and Path(path).suffix in TRAINING_DATA_EXTENSIONS def is_nlu_file(file_path: Text) -> bool: @@ -132,7 +143,7 @@ def is_nlu_file(file_path: Text) -> bool: Returns: `True` if it's a nlu file, otherwise `False`. """ - return loading.guess_format(file_path) != loading.UNK + return nlu_loading.guess_format(file_path) != nlu_loading.UNK def is_story_file(file_path: Text) -> bool: @@ -144,34 +155,37 @@ def is_story_file(file_path: Text) -> bool: Returns: `True` if it's a story file, otherwise `False`. """ - _is_story_file = False - - if file_path.endswith(".md"): - with open(file_path, encoding="utf-8") as f: - _is_story_file = any(_contains_story_pattern(l) for l in f) + from rasa.core.training.story_reader.yaml_story_reader import YAMLStoryReader - return _is_story_file + if YAMLStoryReader.is_yaml_story_file(file_path): + return True + from rasa.core.training.story_reader.markdown_story_reader import ( + MarkdownStoryReader, + ) -def _contains_story_pattern(text: Text) -> bool: - story_pattern = r".*##.+" + return MarkdownStoryReader.is_markdown_story_file(file_path) - return re.match(story_pattern, text) is not None - -def is_domain_file(file_path: Text) -> bool: - """Checks whether the given file path is a Rasa domain file. +def is_end_to_end_conversation_test_file(file_path: Text) -> bool: + """Checks if a file is an end-to-end conversation test file. Args: file_path: Path of the file which should be checked. Returns: - `True` if it's a domain file, otherwise `False`. + `True` if it's a conversation test file, otherwise `False`. """ - file_name = os.path.basename(file_path) + if Path(file_path).suffix not in MARKDOWN_FILE_EXTENSIONS: + return False - return file_name in ["domain.yml", "domain.yaml"] + dirname = os.path.dirname(file_path) + return ( + DEFAULT_E2E_TESTS_PATH in dirname + and is_story_file(file_path) + and not is_nlu_file(file_path) + ) def is_config_file(file_path: Text) -> bool: @@ -189,7 +203,7 @@ def is_config_file(file_path: Text) -> bool: return file_name in ["config.yml", "config.yaml"] -def _copy_files_to_new_dir(files: Set[Text]) -> Text: +def _copy_files_to_new_dir(files: Iterable[Text]) -> Text: directory = tempfile.mkdtemp() for f in files: # makes sure files do not overwrite each other, hence the prefix diff --git a/rasa/exceptions.py b/rasa/exceptions.py index 6fa010da65a5..c192b98d5d11 100644 --- a/rasa/exceptions.py +++ b/rasa/exceptions.py @@ -1,6 +1,35 @@ +from typing import Text + + class RasaException(Exception): """Base exception class for all errors raised by Rasa.""" class ModelNotFound(RasaException): """Raised when a model is not found in the path provided by the user.""" + + +class NoEventsToMigrateError(RasaException): + """Raised when no events to be migrated are found.""" + + +class NoConversationsInTrackerStoreError(RasaException): + """Raised when a tracker store does not contain any conversations.""" + + +class NoEventsInTimeRangeError(RasaException): + """Raised when a tracker store does not contain events within a given time range.""" + + +class PublishingError(RasaException): + """Raised when publishing of an event fails. + + Attributes: + timestamp -- Unix timestamp of the event during which publishing fails. + """ + + def __init__(self, timestamp: float) -> None: + self.timestamp = timestamp + + def __str__(self) -> Text: + return str(self.timestamp) diff --git a/rasa/importers/autoconfig.py b/rasa/importers/autoconfig.py new file mode 100644 index 000000000000..51b2e1736451 --- /dev/null +++ b/rasa/importers/autoconfig.py @@ -0,0 +1,245 @@ +import copy +import logging +import os +import sys + +from typing import Text, Dict, Any, List, Set + +from rasa.cli import utils as cli_utils +from rasa.constants import ( + CONFIG_AUTOCONFIGURABLE_KEYS, + DOCS_URL_PIPELINE, + DOCS_URL_POLICIES, + CONFIG_KEYS, +) +from rasa.utils import io as io_utils, common as common_utils + +logger = logging.getLogger(__name__) + +COMMENTS_FOR_KEYS = { + "pipeline": ( + f"# # No configuration for the NLU pipeline was provided. The following " + f"default pipeline was used to train your model.\n" + f"# # If you'd like to customize it, uncomment and adjust the pipeline.\n" + f"# # See {DOCS_URL_PIPELINE} for more information.\n" + ), + "policies": ( + f"# # No configuration for policies was provided. The following default " + f"policies were used to train your model.\n" + f"# # If you'd like to customize them, uncomment and adjust the policies.\n" + f"# # See {DOCS_URL_POLICIES} for more information.\n" + ), +} + + +def get_configuration(config_file_path: Text) -> Dict[Text, Any]: + """Determine configuration from a configuration file. + + Keys that are provided and have a value in the file are kept. Keys that are not + provided are configured automatically. + + Args: + config_file_path: The path to the configuration file. + """ + if not config_file_path or not os.path.exists(config_file_path): + logger.debug("No configuration file was provided to the TrainingDataImporter.") + return {} + + config = io_utils.read_config_file(config_file_path) + + missing_keys = _get_missing_config_keys(config) + keys_to_configure = _get_unspecified_autoconfigurable_keys(config) + + if keys_to_configure: + config = _auto_configure(config, keys_to_configure) + _dump_config(config, config_file_path, missing_keys, keys_to_configure) + + return config + + +def _get_unspecified_autoconfigurable_keys(config: Dict[Text, Any]) -> Set[Text]: + return {k for k in CONFIG_AUTOCONFIGURABLE_KEYS if not config.get(k)} + + +def _get_missing_config_keys(config: Dict[Text, Any]) -> Set[Text]: + return {k for k in CONFIG_KEYS if k not in config.keys()} + + +def _auto_configure( + config: Dict[Text, Any], keys_to_configure: Set[Text] +) -> Dict[Text, Any]: + """Complete a config by adding automatic configuration for the specified keys. + + Args: + config: The provided configuration. + keys_to_configure: Keys to be configured automatically (e.g. `policies`). + + Returns: + The resulting configuration including both the provided and the automatically + configured keys. + """ + import pkg_resources + + if keys_to_configure: + logger.debug( + f"The provided configuration does not contain the key(s) " + f"{common_utils.transform_collection_to_sentence(keys_to_configure)}. " + f"Values will be provided from the default configuration." + ) + + if sys.platform == "win32": + default_config_file = pkg_resources.resource_filename( + __name__, "default_config_windows.yml" + ) + else: + default_config_file = pkg_resources.resource_filename( + __name__, "default_config.yml" + ) + + default_config = io_utils.read_config_file(default_config_file) + + config = copy.deepcopy(config) + for key in keys_to_configure: + config[key] = default_config[key] + + return config + + +def _dump_config( + config: Dict[Text, Any], + config_file_path: Text, + missing_keys: Set[Text], + auto_configured_keys: Set[Text], +) -> None: + """Dump the automatically configured keys into the config file. + + The configuration provided in the file is kept as it is (preserving the order of + keys and comments). + For keys that were automatically configured, an explanatory comment is added and the + automatically chosen configuration is added commented-out. + If there are already blocks with comments from a previous auto configuration run, + they are replaced with the new auto configuration. + + Args: + config: The configuration including the automatically configured keys. + config_file_path: The file into which the configuration should be dumped. + missing_keys: Keys that need to be added to the config file. + auto_configured_keys: Keys for which a commented out auto configuration section + needs to be added to the config file. + """ + config_as_expected = _is_config_file_as_expected( + config_file_path, missing_keys, auto_configured_keys + ) + if not config_as_expected: + cli_utils.print_error( + f"The configuration file at '{config_file_path}' has been removed or " + f"modified while the automatic configuration was running. The current " + f"configuration will therefore not be dumped to the file. If you want to " + f"your model to use the configuration provided in '{config_file_path}', " + f"you need to re-run training." + ) + return + + _add_missing_config_keys_to_file(config_file_path, missing_keys) + + autoconfig_lines = _get_commented_out_autoconfig_lines(config, auto_configured_keys) + + with open(config_file_path, "r+", encoding=io_utils.DEFAULT_ENCODING) as f: + lines = f.readlines() + updated_lines = _get_lines_including_autoconfig(lines, autoconfig_lines) + f.seek(0) + for line in updated_lines: + f.write(line) + + auto_configured_keys = common_utils.transform_collection_to_sentence( + auto_configured_keys + ) + cli_utils.print_info( + f"The configuration for {auto_configured_keys} was chosen automatically. It " + f"was written into the config file at '{config_file_path}'." + ) + + +def _is_config_file_as_expected( + config_file_path: Text, missing_keys: Set[Text], auto_configured_keys: Set[Text] +) -> bool: + try: + content = io_utils.read_config_file(config_file_path) + except ValueError: + content = "" + + return ( + bool(content) + and missing_keys == _get_missing_config_keys(content) + and auto_configured_keys == _get_unspecified_autoconfigurable_keys(content) + ) + + +def _add_missing_config_keys_to_file( + config_file_path: Text, missing_keys: Set[Text] +) -> None: + if not missing_keys: + return + with open(config_file_path, "a", encoding=io_utils.DEFAULT_ENCODING) as f: + for key in missing_keys: + f.write(f"{key}:\n") + + +def _get_lines_including_autoconfig( + lines: List[Text], autoconfig_lines: Dict[Text, List[Text]] +) -> List[Text]: + auto_configured_keys = autoconfig_lines.keys() + + lines_with_autoconfig = [] + remove_comments_until_next_uncommented_line = False + for line in lines: + insert_section = None + + # remove old auto configuration + if remove_comments_until_next_uncommented_line: + if line.startswith("#"): + continue + remove_comments_until_next_uncommented_line = False + + # add an explanatory comment to auto configured sections + for key in auto_configured_keys: + if line.startswith(f"{key}:"): # start of next auto-section + line = line + COMMENTS_FOR_KEYS[key] + insert_section = key + remove_comments_until_next_uncommented_line = True + + lines_with_autoconfig.append(line) + + if not insert_section: + continue + + # add the auto configuration (commented out) + lines_with_autoconfig += autoconfig_lines[insert_section] + + return lines_with_autoconfig + + +def _get_commented_out_autoconfig_lines( + config: Dict[Text, Any], auto_configured_keys: Set[Text] +) -> Dict[Text, List[Text]]: + import ruamel.yaml as yaml + import ruamel.yaml.compat + + yaml_parser = yaml.YAML() + yaml_parser.indent(mapping=2, sequence=4, offset=2) + + autoconfig_lines = {} + + for key in auto_configured_keys: + stream = yaml.compat.StringIO() + yaml_parser.dump(config.get(key), stream) + dump = stream.getvalue() + + lines = dump.split("\n") + if not lines[-1]: + lines = lines[:-1] # yaml dump adds an empty line at the end + lines = [f"# {line}\n" for line in lines] + + autoconfig_lines[key] = lines + + return autoconfig_lines diff --git a/rasa/importers/default_config.yml b/rasa/importers/default_config.yml new file mode 100644 index 000000000000..87f863f1b3f8 --- /dev/null +++ b/rasa/importers/default_config.yml @@ -0,0 +1,28 @@ +# Configuration for the Rasa NLU components. +# https://rasa.com/docs/rasa/nlu/components/ +language: en + +pipeline: + - name: ConveRTTokenizer + - name: ConveRTFeaturizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 + +# Configuration for the Rasa Core policies. +# https://rasa.com/docs/rasa/core/policies/ +policies: + - name: MemoizationPolicy + - name: TEDPolicy + max_history: 5 + epochs: 100 + - name: RulePolicy diff --git a/rasa/importers/default_config_windows.yml b/rasa/importers/default_config_windows.yml new file mode 100644 index 000000000000..198c46680499 --- /dev/null +++ b/rasa/importers/default_config_windows.yml @@ -0,0 +1,27 @@ +# Configuration for the Rasa NLU components. +# https://rasa.com/docs/rasa/nlu/components/ +language: en + +pipeline: + - name: WhitespaceTokenizer + - name: RegexFeaturizer + - name: LexicalSyntacticFeaturizer + - name: CountVectorsFeaturizer + - name: CountVectorsFeaturizer + analyzer: "char_wb" + min_ngram: 1 + max_ngram: 4 + - name: DIETClassifier + epochs: 100 + - name: EntitySynonymMapper + - name: ResponseSelector + epochs: 100 + +# Configuration for the Rasa Core policies. +# https://rasa.com/docs/rasa/core/policies/ +policies: + - name: MemoizationPolicy + - name: TEDPolicy + max_history: 5 + epochs: 100 + - name: RulePolicy diff --git a/rasa/importers/importer.py b/rasa/importers/importer.py index 6663ff848ee4..9724779457b9 100644 --- a/rasa/importers/importer.py +++ b/rasa/importers/importer.py @@ -160,7 +160,7 @@ def _importer_from_dict( try: importer_class = common_utils.class_from_module_path(module_path) except (AttributeError, ImportError): - logging.warning("Importer '{}' not found.".format(module_path)) + logging.warning(f"Importer '{module_path}' not found.") return None constructor_arguments = common_utils.minimal_kwargs( diff --git a/rasa/importers/multi_project.py b/rasa/importers/multi_project.py index e18bd7922c77..17dff5af01af 100644 --- a/rasa/importers/multi_project.py +++ b/rasa/importers/multi_project.py @@ -7,12 +7,11 @@ import rasa.utils.io as io_utils from rasa.core.domain import Domain from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter -from rasa.core.training.dsl import StoryFileReader from rasa.importers.importer import TrainingDataImporter from rasa.importers import utils from rasa.nlu.training_data import TrainingData from rasa.core.training.structures import StoryGraph -import rasa.utils.common +from rasa.utils.common import raise_warning, mark_as_experimental_feature logger = logging.getLogger(__name__) @@ -31,8 +30,9 @@ def __init__( else: self._domain_paths = [] self._story_paths = [] + self._e2e_story_paths = [] self._nlu_paths = [] - self._imports = set() + self._imports = [] self._additional_paths = training_data_paths or [] self._project_directory = project_directory or os.path.dirname(config_file) @@ -41,18 +41,14 @@ def __init__( extra_story_files, extra_nlu_files = data.get_core_nlu_files( training_data_paths ) - self._story_paths += list(extra_story_files) - self._nlu_paths += list(extra_nlu_files) + self._story_paths += extra_story_files + self._nlu_paths += extra_nlu_files logger.debug( - "Selected projects: {}".format( - "".join(["\n-{}".format(i) for i in self._imports]) - ) + "Selected projects: {}".format("".join([f"\n-{i}" for i in self._imports])) ) - rasa.utils.common.mark_as_experimental_feature( - feature_name="MultiProjectImporter" - ) + mark_as_experimental_feature(feature_name="MultiProjectImporter") def _init_from_path(self, path: Text) -> None: if os.path.isfile(path): @@ -68,17 +64,21 @@ def _init_from_file(self, path: Text) -> None: parent_directory = os.path.dirname(path) self._init_from_dict(config, parent_directory) else: - logger.warning( - "'{}' does not exist or is not a valid config file.".format(path) - ) + raise_warning(f"'{path}' does not exist or is not a valid config file.") def _init_from_dict(self, _dict: Dict[Text, Any], parent_directory: Text) -> None: imports = _dict.get("imports") or [] - imports = {os.path.join(parent_directory, i) for i in imports} + imports = [os.path.join(parent_directory, i) for i in imports] # clean out relative paths - imports = {os.path.abspath(i) for i in imports} - import_candidates = [p for p in imports if not self._is_explicitly_imported(p)] - self._imports = self._imports.union(import_candidates) + imports = [os.path.abspath(i) for i in imports] + + # remove duplication + import_candidates = [] + for i in imports: + if i not in import_candidates and not self._is_explicitly_imported(i): + import_candidates.append(i) + + self._imports.extend(import_candidates) # import config files from paths which have not been processed so far for p in import_candidates: @@ -88,14 +88,16 @@ def _is_explicitly_imported(self, path: Text) -> bool: return not self.no_skills_selected() and self.is_imported(path) def _init_from_directory(self, path: Text): - for parent, _, files in os.walk(path): + for parent, _, files in os.walk(path, followlinks=True): for file in files: full_path = os.path.join(parent, file) if not self.is_imported(full_path): # Check next file continue - if data.is_domain_file(full_path): + if data.is_end_to_end_conversation_test_file(full_path): + self._e2e_story_paths.append(full_path) + elif Domain.is_domain_file(full_path): self._domain_paths.append(full_path) elif data.is_nlu_file(full_path): self._nlu_paths.append(full_path) @@ -157,11 +159,11 @@ def _is_in_additional_paths(self, path: Text) -> bool: return included - def _is_in_imported_paths(self, path): + def _is_in_imported_paths(self, path) -> bool: return any([io_utils.is_subdirectory(path, i) for i in self._imports]) def add_import(self, path: Text) -> None: - self._imports.add(path) + self._imports.append(path) async def get_domain(self) -> Domain: domains = [Domain.load(path) for path in self._domain_paths] @@ -176,15 +178,16 @@ async def get_stories( use_e2e: bool = False, exclusion_percentage: Optional[int] = None, ) -> StoryGraph: - story_steps = await StoryFileReader.read_from_files( - self._story_paths, + story_paths = self._story_paths if not use_e2e else self._e2e_story_paths + + return await utils.story_graph_from_paths( + story_paths, await self.get_domain(), interpreter, template_variables, use_e2e, exclusion_percentage, ) - return StoryGraph(story_steps) async def get_config(self) -> Dict: return self.config diff --git a/rasa/importers/rasa.py b/rasa/importers/rasa.py index 2dafb319b2f0..0a594843b0f0 100644 --- a/rasa/importers/rasa.py +++ b/rasa/importers/rasa.py @@ -1,16 +1,14 @@ import logging -import os -from typing import Optional, Text, Union, List, Dict +from typing import Dict, List, Optional, Text, Union from rasa import data from rasa.core.domain import Domain, InvalidDomain -from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter from rasa.core.training.structures import StoryGraph -from rasa.core.training.dsl import StoryFileReader -from rasa.importers import utils +from rasa.importers import utils, autoconfig from rasa.importers.importer import TrainingDataImporter from rasa.nlu.training_data import TrainingData -from rasa.utils import io as io_utils +from rasa.utils.common import raise_warning logger = logging.getLogger(__name__) @@ -24,10 +22,6 @@ def __init__( domain_path: Optional[Text] = None, training_data_paths: Optional[Union[List[Text], Text]] = None, ): - if config_file and os.path.exists(config_file): - self.config = io_utils.read_config_file(config_file) - else: - self.config = {} self._domain_path = domain_path @@ -35,6 +29,8 @@ def __init__( training_data_paths ) + self.config = autoconfig.get_configuration(config_file) + async def get_config(self) -> Dict: return self.config @@ -46,7 +42,7 @@ async def get_stories( exclusion_percentage: Optional[int] = None, ) -> StoryGraph: - story_steps = await StoryFileReader.read_from_files( + return await utils.story_graph_from_paths( self._story_files, await self.get_domain(), interpreter, @@ -54,7 +50,6 @@ async def get_stories( use_e2e, exclusion_percentage, ) - return StoryGraph(story_steps) async def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: return utils.training_data_from_paths(self._nlu_files, language) @@ -65,10 +60,9 @@ async def get_domain(self) -> Domain: domain = Domain.load(self._domain_path) domain.check_missing_templates() except InvalidDomain as e: - logger.warning( - "Loading domain from '{}' failed. Using empty domain. Error: '{}'".format( - self._domain_path, e.message - ) + raise_warning( + f"Loading domain from '{self._domain_path}' failed. Using " + f"empty domain. Error: '{e.message}'" ) return domain diff --git a/rasa/importers/utils.py b/rasa/importers/utils.py index e42f5bf7c1c4..3e4a603cc61c 100644 --- a/rasa/importers/utils.py +++ b/rasa/importers/utils.py @@ -1,12 +1,30 @@ -from typing import Iterable, Text +from typing import Iterable, Text, Optional, Dict, List +from rasa.core.domain import Domain +from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter +from rasa.core.training.structures import StoryGraph from rasa.nlu.training_data import TrainingData def training_data_from_paths(paths: Iterable[Text], language: Text) -> TrainingData: from rasa.nlu.training_data import loading - training_datas = [loading.load_data(nlu_file, language) for nlu_file in paths] - merged_training_data = TrainingData().merge(*training_datas) - merged_training_data.fill_response_phrases() - return merged_training_data + training_data_sets = [loading.load_data(nlu_file, language) for nlu_file in paths] + return TrainingData().merge(*training_data_sets) + + +async def story_graph_from_paths( + files: List[Text], + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + template_variables: Optional[Dict] = None, + use_e2e: bool = False, + exclusion_percentage: Optional[int] = None, +) -> StoryGraph: + + from rasa.core.training import loading + + story_steps = await loading.load_data_from_files( + files, domain, interpreter, template_variables, use_e2e, exclusion_percentage + ) + return StoryGraph(story_steps) diff --git a/rasa/jupyter.py b/rasa/jupyter.py index 57d286b311ce..cce947812675 100644 --- a/rasa/jupyter.py +++ b/rasa/jupyter.py @@ -4,7 +4,6 @@ from typing import Any, Dict, Text, Optional from rasa.cli.utils import print_success, print_error from rasa.core.interpreter import NaturalLanguageInterpreter, RasaNLUInterpreter -import rasa.model as model if typing.TYPE_CHECKING: from rasa.core.agent import Agent @@ -16,6 +15,7 @@ def pprint(obj: Any): def chat( model_path: Optional[Text] = None, + endpoints: Optional[Text] = None, agent: Optional["Agent"] = None, interpreter: Optional[NaturalLanguageInterpreter] = None, ) -> None: @@ -23,6 +23,7 @@ def chat( Args: model_path: Path to a combined Rasa model. + endpoints: Path to a yaml with the action server is custom actions are defined. agent: Rasa Core agent (used if no Rasa model given). interpreter: Rasa NLU interpreter (used with Rasa Core agent if no Rasa model is given). @@ -31,7 +32,7 @@ def chat( if model_path: from rasa.run import create_agent - agent = create_agent(model_path) + agent = create_agent(model_path, endpoints) elif agent is not None and interpreter is not None: # HACK: this skips loading the interpreter and directly @@ -47,7 +48,7 @@ def chat( ) return - print ("Your bot is ready to talk! Type your messages here or send '/stop'.") + print("Your bot is ready to talk! Type your messages here or send '/stop'.") loop = asyncio.get_event_loop() while True: message = input() diff --git a/rasa/model.py b/rasa/model.py index f4b1375e728a..47684c66bb3e 100644 --- a/rasa/model.py +++ b/rasa/model.py @@ -4,15 +4,19 @@ import shutil import tempfile import typing -from typing import Text, Tuple, Union, Optional, List, Dict +from pathlib import Path +from typing import Text, Tuple, Union, Optional, List, Dict, NamedTuple import rasa.utils.io from rasa.cli.utils import print_success, create_output_path from rasa.constants import ( DEFAULT_MODELS_PATH, - CONFIG_MANDATORY_KEYS_CORE, - CONFIG_MANDATORY_KEYS_NLU, - CONFIG_MANDATORY_KEYS, + CONFIG_KEYS_CORE, + CONFIG_KEYS_NLU, + CONFIG_KEYS, + DEFAULT_DOMAIN_PATH, + DEFAULT_CORE_SUBDIRECTORY_NAME, + DEFAULT_NLU_SUBDIRECTORY_NAME, ) from rasa.core.utils import get_dict_hash @@ -22,25 +26,100 @@ if typing.TYPE_CHECKING: from rasa.importers.importer import TrainingDataImporter -# Type alias for the fingerprint -Fingerprint = Dict[Text, Union[Text, List[Text], int, float]] logger = logging.getLogger(__name__) + +# Type alias for the fingerprint +Fingerprint = Dict[Text, Union[Text, List[Text], int, float]] + FINGERPRINT_FILE_PATH = "fingerprint.json" FINGERPRINT_CONFIG_KEY = "config" FINGERPRINT_CONFIG_CORE_KEY = "core-config" FINGERPRINT_CONFIG_NLU_KEY = "nlu-config" -FINGERPRINT_DOMAIN_KEY = "domain" +FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain" +FINGERPRINT_NLG_KEY = "nlg" FINGERPRINT_RASA_VERSION_KEY = "version" FINGERPRINT_STORIES_KEY = "stories" FINGERPRINT_NLU_DATA_KEY = "messages" FINGERPRINT_TRAINED_AT_KEY = "trained_at" +class Section(NamedTuple): + """Defines relevant fingerprint sections which are used to decide whether a model + should be retrained.""" + + name: Text + relevant_keys: List[Text] + + +SECTION_CORE = Section( + name="Core model", + relevant_keys=[ + FINGERPRINT_CONFIG_KEY, + FINGERPRINT_CONFIG_CORE_KEY, + FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY, + FINGERPRINT_STORIES_KEY, + FINGERPRINT_RASA_VERSION_KEY, + ], +) +SECTION_NLU = Section( + name="NLU model", + relevant_keys=[ + FINGERPRINT_CONFIG_KEY, + FINGERPRINT_CONFIG_NLU_KEY, + FINGERPRINT_NLU_DATA_KEY, + FINGERPRINT_RASA_VERSION_KEY, + ], +) +SECTION_NLG = Section(name="NLG templates", relevant_keys=[FINGERPRINT_NLG_KEY]) + + +class FingerprintComparisonResult: + def __init__( + self, + nlu: bool = True, + core: bool = True, + nlg: bool = True, + force_training: bool = False, + ): + """Creates a `FingerprintComparisonResult` instance. + + Args: + nlu: `True` if the NLU model should be retrained. + core: `True` if the Core model should be retrained. + nlg: `True` if the responses in the domain should be updated. + force_training: `True` if a training of all parts is forced. + """ + self.nlu = nlu + self.core = core + self.nlg = nlg + self.force_training = force_training + + def is_training_required(self) -> bool: + """Check if anything has to be retrained.""" + + return any([self.nlg, self.nlu, self.core, self.force_training]) + + def should_retrain_core(self) -> bool: + """Check if the Core model has to be updated.""" + + return self.force_training or self.core + + def should_retrain_nlg(self) -> bool: + """Check if the responses have to be updated.""" + + return self.should_retrain_core() or self.nlg + + def should_retrain_nlu(self) -> bool: + """Check if the NLU model has to be updated.""" + + return self.force_training or self.nlu + + def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath: - """Gets a model and unpacks it. Raises a `ModelNotFound` exception if + """Get a model and unpack it. Raises a `ModelNotFound` exception if no model could be found at the provided path. Args: @@ -54,24 +133,22 @@ def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath: if not model_path: raise ModelNotFound("No path specified.") elif not os.path.exists(model_path): - raise ModelNotFound("No file or directory at '{}'.".format(model_path)) + raise ModelNotFound(f"No file or directory at '{model_path}'.") if os.path.isdir(model_path): model_path = get_latest_model(model_path) if not model_path: raise ModelNotFound( - "Could not find any Rasa model files in '{}'.".format(model_path) + f"Could not find any Rasa model files in '{model_path}'." ) elif not model_path.endswith(".tar.gz"): - raise ModelNotFound( - "Path '{}' does not point to a Rasa model file.".format(model_path) - ) + raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.") return unpack_model(model_path) def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]: - """Gets the latest model from a path. + """Get the latest model from a path. Args: model_path: Path to a directory containing zipped models. @@ -92,9 +169,9 @@ def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]: def unpack_model( - model_file: Text, working_directory: Optional[Text] = None + model_file: Text, working_directory: Optional[Union[Path, Text]] = None ) -> TempDirectoryPath: - """Unpacks a zipped Rasa model. + """Unpack a zipped Rasa model. Args: model_file: Path to zipped model. @@ -110,23 +187,22 @@ def unpack_model( if working_directory is None: working_directory = tempfile.mkdtemp() - tar = tarfile.open(model_file) - - # cast `working_directory` as str for py3.5 compatibility - working_directory = str(working_directory) - # All files are in a subdirectory. - tar.extractall(working_directory) - tar.close() - logger.debug("Extracted model to '{}'.".format(working_directory)) + try: + with tarfile.open(model_file, mode="r:gz") as tar: + tar.extractall(working_directory) + logger.debug(f"Extracted model to '{working_directory}'.") + except Exception as e: + logger.error(f"Failed to extract model at {model_file}. Error: {e}") + raise return TempDirectoryPath(working_directory) def get_model_subdirectories( - unpacked_model_path: Text + unpacked_model_path: Text, ) -> Tuple[Optional[Text], Optional[Text]]: - """Returns paths for Core and NLU model directories, if they exist. + """Return paths for Core and NLU model directories, if they exist. If neither directories exist, a `ModelNotFound` exception is raised. Args: @@ -137,8 +213,8 @@ def get_model_subdirectories( path to NLU subdirectory if it exists or `None` otherwise). """ - core_path = os.path.join(unpacked_model_path, "core") - nlu_path = os.path.join(unpacked_model_path, "nlu") + core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME) + nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME) if not os.path.isdir(core_path): core_path = None @@ -161,7 +237,7 @@ def create_package_rasa( output_filename: Text, fingerprint: Optional[Fingerprint] = None, ) -> Text: - """Creates a zipped Rasa model from trained model files. + """Create a zipped Rasa model from trained model files. Args: training_directory: Path to the directory which contains the trained @@ -191,7 +267,7 @@ def create_package_rasa( async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint: - """Creates a model fingerprint from its used configuration and training data. + """Create a model fingerprint from its used configuration and training data. Args: file_importer: File importer which provides the training data and model config. @@ -200,6 +276,8 @@ async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprin The fingerprint. """ + from rasa.core.domain import Domain + import rasa import time @@ -208,17 +286,20 @@ async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprin stories = await file_importer.get_stories() nlu_data = await file_importer.get_nlu_data() + domain_dict = domain.as_dict() + responses = domain_dict.pop("responses") + domain_without_nlg = Domain.from_dict(domain_dict) + return { - FINGERPRINT_CONFIG_KEY: _get_hash_of_config( - config, exclude_keys=CONFIG_MANDATORY_KEYS - ), + FINGERPRINT_CONFIG_KEY: _get_hash_of_config(config, exclude_keys=CONFIG_KEYS), FINGERPRINT_CONFIG_CORE_KEY: _get_hash_of_config( - config, include_keys=CONFIG_MANDATORY_KEYS_CORE + config, include_keys=CONFIG_KEYS_CORE ), FINGERPRINT_CONFIG_NLU_KEY: _get_hash_of_config( - config, include_keys=CONFIG_MANDATORY_KEYS_NLU + config, include_keys=CONFIG_KEYS_NLU ), - FINGERPRINT_DOMAIN_KEY: hash(domain), + FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: hash(domain_without_nlg), + FINGERPRINT_NLG_KEY: get_dict_hash(responses), FINGERPRINT_NLU_DATA_KEY: hash(nlu_data), FINGERPRINT_STORIES_KEY: hash(stories), FINGERPRINT_TRAINED_AT_KEY: time.time(), @@ -236,13 +317,13 @@ def _get_hash_of_config( keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys())) - sub_config = dict((k, config[k]) for k in keys if k in config) + sub_config = {k: config[k] for k in keys if k in config} return get_dict_hash(sub_config) def fingerprint_from_path(model_path: Text) -> Fingerprint: - """Loads a persisted fingerprint. + """Load a persisted fingerprint. Args: model_path: Path to directory containing the fingerprint. @@ -262,77 +343,31 @@ def fingerprint_from_path(model_path: Text) -> Fingerprint: def persist_fingerprint(output_path: Text, fingerprint: Fingerprint): - """Persists a model fingerprint. + """Persist a model fingerprint. Args: output_path: Directory in which the fingerprint should be saved. fingerprint: The fingerprint to be persisted. """ - from rasa.core.utils import dump_obj_as_json_to_file path = os.path.join(output_path, FINGERPRINT_FILE_PATH) - dump_obj_as_json_to_file(path, fingerprint) + rasa.utils.io.dump_obj_as_json_to_file(path, fingerprint) -def core_fingerprint_changed( - fingerprint1: Fingerprint, fingerprint2: Fingerprint +def did_section_fingerprint_change( + fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section ) -> bool: - """Checks whether the fingerprints of the Core model changed. - - Args: - fingerprint1: A fingerprint. - fingerprint2: Another fingerprint. - - Returns: - `True` if the fingerprint for the Core model changed, else `False`. - - """ - relevant_keys = [ - FINGERPRINT_CONFIG_KEY, - FINGERPRINT_CONFIG_CORE_KEY, - FINGERPRINT_DOMAIN_KEY, - FINGERPRINT_STORIES_KEY, - FINGERPRINT_RASA_VERSION_KEY, - ] - - for k in relevant_keys: - if fingerprint1.get(k) != fingerprint2.get(k): - logger.info("Data ({}) for Core model changed.".format(k)) - return True - return False - - -def nlu_fingerprint_changed( - fingerprint1: Fingerprint, fingerprint2: Fingerprint -) -> bool: - """Checks whether the fingerprints of the NLU model changed. - - Args: - fingerprint1: A fingerprint. - fingerprint2: Another fingerprint. - - Returns: - `True` if the fingerprint for the NLU model changed, else `False`. - - """ - - relevant_keys = [ - FINGERPRINT_CONFIG_KEY, - FINGERPRINT_CONFIG_NLU_KEY, - FINGERPRINT_NLU_DATA_KEY, - FINGERPRINT_RASA_VERSION_KEY, - ] - - for k in relevant_keys: + """Check whether the fingerprint of a section has changed.""" + for k in section.relevant_keys: if fingerprint1.get(k) != fingerprint2.get(k): - logger.info("Data ({}) for NLU model changed.".format(k)) + logger.info(f"Data ({k}) for {section.name} section changed.") return True return False -def merge_model(source: Text, target: Text) -> bool: - """Merges two model directories. +def move_model(source: Text, target: Text) -> bool: + """Move two model directories. Args: source: The original folder which should be merged in another. @@ -346,12 +381,14 @@ def merge_model(source: Text, target: Text) -> bool: shutil.move(source, target) return True except Exception as e: - logging.debug(e) + logging.debug(f"Could not merge model: {e}") return False -def should_retrain(new_fingerprint: Fingerprint, old_model: Text, train_path: Text): - """Checks which component of a model should be retrained. +def should_retrain( + new_fingerprint: Fingerprint, old_model: Text, train_path: Text +) -> FingerprintComparisonResult: + """Check which components of a model should be retrained. Args: new_fingerprint: The fingerprint of the new model to be trained. @@ -359,29 +396,46 @@ def should_retrain(new_fingerprint: Fingerprint, old_model: Text, train_path: Te train_path: Path to the directory in which the new model will be trained. Returns: - A tuple of boolean values indicating whether Rasa Core and/or Rasa NLU needs + A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa NLU needs to be retrained or not. """ - retrain_nlu = retrain_core = True + fingerprint_comparison = FingerprintComparisonResult() if old_model is None or not os.path.exists(old_model): - return retrain_core, retrain_nlu + return fingerprint_comparison with unpack_model(old_model) as unpacked: last_fingerprint = fingerprint_from_path(unpacked) - old_core, old_nlu = get_model_subdirectories(unpacked) - if not core_fingerprint_changed(last_fingerprint, new_fingerprint): - target_path = os.path.join(train_path, "core") - retrain_core = not merge_model(old_core, target_path) + fingerprint_comparison = FingerprintComparisonResult( + core=did_section_fingerprint_change( + last_fingerprint, new_fingerprint, SECTION_CORE + ), + nlu=did_section_fingerprint_change( + last_fingerprint, new_fingerprint, SECTION_NLU + ), + nlg=did_section_fingerprint_change( + last_fingerprint, new_fingerprint, SECTION_NLG + ), + ) + + core_merge_failed = False + if not fingerprint_comparison.should_retrain_core(): + target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME) + core_merge_failed = not move_model(old_core, target_path) + fingerprint_comparison.core = core_merge_failed + + if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed: + # If moving the Core model failed, we should also retrain NLG + fingerprint_comparison.nlg = True - if not nlu_fingerprint_changed(last_fingerprint, new_fingerprint): + if not fingerprint_comparison.should_retrain_nlu(): target_path = os.path.join(train_path, "nlu") - retrain_nlu = not merge_model(old_nlu, target_path) + fingerprint_comparison.nlu = not move_model(old_nlu, target_path) - return retrain_core, retrain_nlu + return fingerprint_comparison def package_model( @@ -392,7 +446,7 @@ def package_model( model_prefix: Text = "", ): """ - Compresses a trained model. + Compress a trained model. Args: fingerprint: fingerprint of the model @@ -415,3 +469,19 @@ def package_model( ) return output_directory + + +async def update_model_with_new_domain( + importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text] +) -> None: + """Overwrites the domain of an unpacked model with a new domain. + + Args: + importer: Importer which provides the new domain. + unpacked_model_path: Path to the unpacked model. + """ + + model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME + domain = await importer.get_domain() + + domain.persist(model_path / DEFAULT_DOMAIN_PATH) diff --git a/rasa/nlu/classifiers/classifier.py b/rasa/nlu/classifiers/classifier.py new file mode 100644 index 000000000000..ee9d5cc73373 --- /dev/null +++ b/rasa/nlu/classifiers/classifier.py @@ -0,0 +1,5 @@ +from rasa.nlu.components import Component + + +class IntentClassifier(Component): + pass diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py new file mode 100644 index 000000000000..2488d73f08bd --- /dev/null +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -0,0 +1,2017 @@ +import copy +import logging +from collections import defaultdict +from pathlib import Path + +import numpy as np +import os +import scipy.sparse +import tensorflow as tf +import tensorflow_addons as tfa + +from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type, NamedTuple + +import rasa.utils.common as common_utils +import rasa.utils.io as io_utils +import rasa.nlu.utils.bilou_utils as bilou_utils +from rasa.nlu.featurizers.featurizer import Featurizer +from rasa.nlu.components import Component +from rasa.nlu.classifiers.classifier import IntentClassifier +from rasa.nlu.extractors.extractor import EntityExtractor +from rasa.nlu.test import determine_token_labels +from rasa.nlu.classifiers import LABEL_RANKING_LENGTH +from rasa.utils import train_utils +from rasa.utils.tensorflow import layers +from rasa.utils.tensorflow.transformer import TransformerEncoder +from rasa.utils.tensorflow.models import RasaModel +from rasa.utils.tensorflow.model_data import RasaModelData, FeatureSignature +from rasa.nlu.constants import ( + INTENT, + TEXT, + ENTITIES, + NO_ENTITY_TAG, + TOKENS_NAMES, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_ROLE, +) +from rasa.nlu.config import RasaNLUModelConfig, InvalidConfigError +from rasa.nlu.training_data import TrainingData +from rasa.nlu.model import Metadata +from rasa.nlu.training_data import Message +from rasa.utils.tensorflow.constants import ( + LABEL, + HIDDEN_LAYERS_SIZES, + SHARE_HIDDEN_LAYERS, + TRANSFORMER_SIZE, + NUM_TRANSFORMER_LAYERS, + NUM_HEADS, + BATCH_SIZES, + BATCH_STRATEGY, + EPOCHS, + RANDOM_SEED, + LEARNING_RATE, + DENSE_DIMENSION, + RANKING_LENGTH, + LOSS_TYPE, + SIMILARITY_TYPE, + NUM_NEG, + SPARSE_INPUT_DROPOUT, + DENSE_INPUT_DROPOUT, + MASKED_LM, + ENTITY_RECOGNITION, + TENSORBOARD_LOG_DIR, + INTENT_CLASSIFICATION, + EVAL_NUM_EXAMPLES, + EVAL_NUM_EPOCHS, + UNIDIRECTIONAL_ENCODER, + DROP_RATE, + DROP_RATE_ATTENTION, + WEIGHT_SPARSITY, + NEGATIVE_MARGIN_SCALE, + REGULARIZATION_CONSTANT, + SCALE_LOSS, + USE_MAX_NEG_SIM, + MAX_NEG_SIM, + MAX_POS_SIM, + EMBEDDING_DIMENSION, + BILOU_FLAG, + KEY_RELATIVE_ATTENTION, + VALUE_RELATIVE_ATTENTION, + MAX_RELATIVE_POSITION, + SOFTMAX, + AUTO, + BALANCED, + TENSORBOARD_LOG_LEVEL, + CONCAT_DIMENSION, + FEATURIZERS, +) + + +logger = logging.getLogger(__name__) + + +SENTENCE = "sentence" +SEQUENCE = "sequence" +TEXT_SENTENCE_FEATURES = f"{TEXT}_{SENTENCE}_features" +LABEL_SENTENCE_FEATURES = f"{LABEL}_{SENTENCE}_features" +TEXT_SEQUENCE_FEATURES = f"{TEXT}_{SEQUENCE}_features" +LABEL_SEQUENCE_FEATURES = f"{LABEL}_{SEQUENCE}_features" +TEXT_SEQUENCE_LENGTH = f"{TEXT}_{SEQUENCE}_lengths" +LABEL_SEQUENCE_LENGTH = f"{LABEL}_{SEQUENCE}_lengths" +LABEL_IDS = f"{LABEL}_ids" +TAG_IDS = "tag_ids" + +POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP] + + +class EntityTagSpec(NamedTuple): + """Specification of an entity tag present in the training data.""" + + tag_name: Text + ids_to_tags: Dict[int, Text] + tags_to_ids: Dict[Text, int] + num_tags: int + + +class DIETClassifier(IntentClassifier, EntityExtractor): + """DIET (Dual Intent and Entity Transformer) is a multi-task architecture for + intent classification and entity recognition. + + The architecture is based on a transformer which is shared for both tasks. + A sequence of entity labels is predicted through a Conditional Random Field (CRF) + tagging layer on top of the transformer output sequence corresponding to the + input sequence of tokens. The transformer output for the ``__CLS__`` token and + intent labels are embedded into a single semantic vector space. We use the + dot-product loss to maximize the similarity with the target label and minimize + similarities with negative samples. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Featurizer] + + # please make sure to update the docs when changing a default parameter + defaults = { + # ## Architecture of the used neural network + # Hidden layer sizes for layers before the embedding layers for user message + # and labels. + # The number of hidden layers is equal to the length of the corresponding + # list. + HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []}, + # Whether to share the hidden layer weights between user message and labels. + SHARE_HIDDEN_LAYERS: False, + # Number of units in transformer + TRANSFORMER_SIZE: 256, + # Number of transformer layers + NUM_TRANSFORMER_LAYERS: 2, + # Number of attention heads in transformer + NUM_HEADS: 4, + # If 'True' use key relative embeddings in attention + KEY_RELATIVE_ATTENTION: False, + # If 'True' use value relative embeddings in attention + VALUE_RELATIVE_ATTENTION: False, + # Max position for relative embeddings + MAX_RELATIVE_POSITION: None, + # Use a unidirectional or bidirectional encoder. + UNIDIRECTIONAL_ENCODER: False, + # ## Training parameters + # Initial and final batch sizes: + # Batch size will be linearly increased for each epoch. + BATCH_SIZES: [64, 256], + # Strategy used when creating batches. + # Can be either 'sequence' or 'balanced'. + BATCH_STRATEGY: BALANCED, + # Number of epochs to train + EPOCHS: 300, + # Set random seed to any 'int' to get reproducible results + RANDOM_SEED: None, + # Initial learning rate for the optimizer + LEARNING_RATE: 0.001, + # ## Parameters for embeddings + # Dimension size of embedding vectors + EMBEDDING_DIMENSION: 20, + # Default dense dimension to use if no dense features are present. + DENSE_DIMENSION: {TEXT: 512, LABEL: 20}, + # Default dimension to use for concatenating sequence and sentence features. + CONCAT_DIMENSION: {TEXT: 512, LABEL: 20}, + # The number of incorrect labels. The algorithm will minimize + # their similarity to the user input during training. + NUM_NEG: 20, + # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'. + SIMILARITY_TYPE: AUTO, + # The type of the loss function, either 'softmax' or 'margin'. + LOSS_TYPE: SOFTMAX, + # Number of top actions to normalize scores for loss type 'softmax'. + # Set to 0 to turn off normalization. + RANKING_LENGTH: 10, + # Indicates how similar the algorithm should try to make embedding vectors + # for correct labels. + # Should be 0.0 < ... < 1.0 for 'cosine' similarity type. + MAX_POS_SIM: 0.8, + # Maximum negative similarity for incorrect labels. + # Should be -1.0 < ... < 1.0 for 'cosine' similarity type. + MAX_NEG_SIM: -0.4, + # If 'True' the algorithm only minimizes maximum similarity over + # incorrect intent labels, used only if 'loss_type' is set to 'margin'. + USE_MAX_NEG_SIM: True, + # If 'True' scale loss inverse proportionally to the confidence + # of the correct prediction + SCALE_LOSS: False, + # ## Regularization parameters + # The scale of regularization + REGULARIZATION_CONSTANT: 0.002, + # The scale of how important is to minimize the maximum similarity + # between embeddings of different labels, + # used only if 'loss_type' is set to 'margin'. + NEGATIVE_MARGIN_SCALE: 0.8, + # Dropout rate for encoder + DROP_RATE: 0.2, + # Dropout rate for attention + DROP_RATE_ATTENTION: 0, + # Sparsity of the weights in dense layers + WEIGHT_SPARSITY: 0.8, + # If 'True' apply dropout to sparse input tensors + SPARSE_INPUT_DROPOUT: True, + # If 'True' apply dropout to dense input tensors + DENSE_INPUT_DROPOUT: True, + # ## Evaluation parameters + # How often calculate validation accuracy. + # Small values may hurt performance, e.g. model accuracy. + EVAL_NUM_EPOCHS: 20, + # How many examples to use for hold out validation set + # Large values may hurt performance, e.g. model accuracy. + EVAL_NUM_EXAMPLES: 0, + # ## Model config + # If 'True' intent classification is trained and intent predicted. + INTENT_CLASSIFICATION: True, + # If 'True' named entity recognition is trained and entities predicted. + ENTITY_RECOGNITION: True, + # If 'True' random tokens of the input message will be masked and the model + # should predict those tokens. + MASKED_LM: False, + # 'BILOU_flag' determines whether to use BILOU tagging or not. + # If set to 'True' labelling is more rigorous, however more + # examples per entity are required. + # Rule of thumb: you should have more than 100 examples per entity. + BILOU_FLAG: True, + # If you want to use tensorboard to visualize training and validation metrics, + # set this option to a valid output directory. + TENSORBOARD_LOG_DIR: None, + # Define when training metrics for tensorboard should be logged. + # Either after every epoch or for every training step. + # Valid values: 'epoch' and 'minibatch' + TENSORBOARD_LOG_LEVEL: "epoch", + # Specify what features to use as sequence and sentence features + # By default all features in the pipeline are used. + FEATURIZERS: [], + } + + # init helpers + def _check_masked_lm(self) -> None: + if ( + self.component_config[MASKED_LM] + and self.component_config[NUM_TRANSFORMER_LAYERS] == 0 + ): + raise ValueError( + f"If number of transformer layers is 0, " + f"'{MASKED_LM}' option should be 'False'." + ) + + def _check_share_hidden_layers_sizes(self) -> None: + if self.component_config.get(SHARE_HIDDEN_LAYERS): + first_hidden_layer_sizes = next( + iter(self.component_config[HIDDEN_LAYERS_SIZES].values()) + ) + # check that all hidden layer sizes are the same + identical_hidden_layer_sizes = all( + current_hidden_layer_sizes == first_hidden_layer_sizes + for current_hidden_layer_sizes in self.component_config[ + HIDDEN_LAYERS_SIZES + ].values() + ) + if not identical_hidden_layer_sizes: + raise ValueError( + f"If hidden layer weights are shared, " + f"{HIDDEN_LAYERS_SIZES} must coincide." + ) + + def _check_config_parameters(self) -> None: + self.component_config = train_utils.check_deprecated_options( + self.component_config + ) + + self._check_masked_lm() + self._check_share_hidden_layers_sizes() + + self.component_config = train_utils.update_similarity_type( + self.component_config + ) + self.component_config = train_utils.update_evaluation_parameters( + self.component_config + ) + + # package safety checks + @classmethod + def required_packages(cls) -> List[Text]: + return ["tensorflow"] + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + index_label_id_mapping: Optional[Dict[int, Text]] = None, + entity_tag_specs: Optional[List[EntityTagSpec]] = None, + model: Optional[RasaModel] = None, + ) -> None: + """Declare instance variables with default values.""" + + if component_config is not None and EPOCHS not in component_config: + common_utils.raise_warning( + f"Please configure the number of '{EPOCHS}' in your configuration file." + f" We will change the default value of '{EPOCHS}' in the future to 1. " + ) + + super().__init__(component_config) + + self._check_config_parameters() + + # transform numbers to labels + self.index_label_id_mapping = index_label_id_mapping + + self._entity_tag_specs = entity_tag_specs + + self.model = model + + self._label_data: Optional[RasaModelData] = None + self._data_example: Optional[Dict[Text, List[np.ndarray]]] = None + + @property + def label_key(self) -> Optional[Text]: + return LABEL_IDS if self.component_config[INTENT_CLASSIFICATION] else None + + @staticmethod + def model_class() -> Type[RasaModel]: + return DIET + + # training data helpers: + @staticmethod + def _label_id_index_mapping( + training_data: TrainingData, attribute: Text + ) -> Dict[Text, int]: + """Create label_id dictionary.""" + + distinct_label_ids = { + example.get(attribute) for example in training_data.intent_examples + } - {None} + return { + label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids)) + } + + @staticmethod + def _invert_mapping(mapping: Dict) -> Dict: + return {value: key for key, value in mapping.items()} + + def _create_entity_tag_specs( + self, training_data: TrainingData + ) -> List[EntityTagSpec]: + """Create entity tag specifications with their respective tag id mappings.""" + + _tag_specs = [] + + for tag_name in POSSIBLE_TAGS: + if self.component_config[BILOU_FLAG]: + tag_id_index_mapping = bilou_utils.build_tag_id_dict( + training_data, tag_name + ) + else: + tag_id_index_mapping = self._tag_id_index_mapping_for( + tag_name, training_data + ) + + if tag_id_index_mapping: + _tag_specs.append( + EntityTagSpec( + tag_name=tag_name, + tags_to_ids=tag_id_index_mapping, + ids_to_tags=self._invert_mapping(tag_id_index_mapping), + num_tags=len(tag_id_index_mapping), + ) + ) + + return _tag_specs + + @staticmethod + def _tag_id_index_mapping_for( + tag_name: Text, training_data: TrainingData + ) -> Optional[Dict[Text, int]]: + """Create mapping from tag name to id.""" + if tag_name == ENTITY_ATTRIBUTE_ROLE: + distinct_tags = training_data.entity_roles + elif tag_name == ENTITY_ATTRIBUTE_GROUP: + distinct_tags = training_data.entity_groups + else: + distinct_tags = training_data.entities + + distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None} + + if not distinct_tags: + return None + + tag_id_dict = { + tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1) + } + # NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index + # needed for correct prediction for padding + tag_id_dict[NO_ENTITY_TAG] = 0 + + return tag_id_dict + + @staticmethod + def _find_example_for_label( + label: Text, examples: List[Message], attribute: Text + ) -> Optional[Message]: + for ex in examples: + if ex.get(attribute) == label: + return ex + return None + + def _check_labels_features_exist( + self, labels_example: List[Message], attribute: Text + ) -> bool: + """Checks if all labels have features set.""" + + return all( + label_example.features_present( + attribute, self.component_config[FEATURIZERS] + ) + for label_example in labels_example + ) + + def _extract_features( + self, message: Message, attribute: Text + ) -> Tuple[ + Optional[scipy.sparse.spmatrix], + Optional[scipy.sparse.spmatrix], + Optional[np.ndarray], + Optional[np.ndarray], + ]: + + ( + sparse_sequence_features, + sparse_sentence_features, + ) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS]) + dense_sequence_features, dense_sentence_features = message.get_dense_features( + attribute, self.component_config[FEATURIZERS] + ) + + if dense_sequence_features is not None and sparse_sequence_features is not None: + if dense_sequence_features.shape[0] != sparse_sequence_features.shape[0]: + raise ValueError( + f"Sequence dimensions for sparse and dense sequence features " + f"don't coincide in '{message.text}' for attribute '{attribute}'." + ) + if dense_sentence_features is not None and sparse_sentence_features is not None: + if dense_sentence_features.shape[0] != sparse_sentence_features.shape[0]: + raise ValueError( + f"Sequence dimensions for sparse and dense sentence features " + f"don't coincide in '{message.text}' for attribute '{attribute}'." + ) + + # If we don't use the transformer and we don't want to do entity recognition, + # to speed up training take only the sentence features as feature vector. + # We would not make use of the sequence anyway in this setup. Carrying over + # those features to the actual training process takes quite some time. + if ( + self.component_config[NUM_TRANSFORMER_LAYERS] == 0 + and not self.component_config[ENTITY_RECOGNITION] + and attribute != INTENT + ): + sparse_sequence_features = None + dense_sequence_features = None + + return ( + sparse_sequence_features, + sparse_sentence_features, + dense_sequence_features, + dense_sentence_features, + ) + + def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None: + """Checks if features have same dimensionality if hidden layers are shared.""" + + if self.component_config.get(SHARE_HIDDEN_LAYERS): + num_text_sentence_features = model_data.feature_dimension( + TEXT_SENTENCE_FEATURES + ) + num_label_sentence_features = model_data.feature_dimension( + LABEL_SENTENCE_FEATURES + ) + num_text_sequence_features = model_data.feature_dimension( + TEXT_SEQUENCE_FEATURES + ) + num_label_sequence_features = model_data.feature_dimension( + LABEL_SEQUENCE_FEATURES + ) + + if ( + num_text_sentence_features > 0 + and num_label_sentence_features > 0 + and num_text_sentence_features != num_label_sentence_features + ) or ( + num_text_sequence_features > 0 + and num_label_sequence_features > 0 + and num_text_sequence_features != num_label_sequence_features + ): + raise ValueError( + "If embeddings are shared text features and label features " + "must coincide. Check the output dimensions of previous components." + ) + + def _extract_labels_precomputed_features( + self, label_examples: List[Message], attribute: Text = INTENT + ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Collects precomputed encodings.""" + + sparse_sequence_features = [] + sparse_sentence_features = [] + dense_sequence_features = [] + dense_sentence_features = [] + + for e in label_examples: + ( + _sparse_sequence, + _sparse_sentence, + _dense_sequence, + _dense_sentence, + ) = self._extract_features(e, attribute) + if _sparse_sequence is not None: + sparse_sequence_features.append(_sparse_sequence) + if _sparse_sentence is not None: + sparse_sentence_features.append(_sparse_sentence) + if _dense_sequence is not None: + dense_sequence_features.append(_dense_sequence) + if _dense_sentence is not None: + dense_sentence_features.append(_dense_sentence) + + sparse_sequence_features = np.array(sparse_sequence_features) + sparse_sentence_features = np.array(sparse_sentence_features) + dense_sequence_features = np.array(dense_sequence_features) + dense_sentence_features = np.array(dense_sentence_features) + + return ( + [sparse_sequence_features, dense_sequence_features], + [sparse_sentence_features, dense_sentence_features], + ) + + @staticmethod + def _compute_default_label_features( + labels_example: List[Message], + ) -> List[np.ndarray]: + """Computes one-hot representation for the labels.""" + + logger.debug("No label features found. Computing default label features.") + + eye_matrix = np.eye(len(labels_example), dtype=np.float32) + # add sequence dimension to one-hot labels + return [np.array([np.expand_dims(a, 0) for a in eye_matrix])] + + def _create_label_data( + self, + training_data: TrainingData, + label_id_dict: Dict[Text, int], + attribute: Text, + ) -> RasaModelData: + """Create matrix with label_ids encoded in rows as bag of words. + + Find a training example for each label and get the encoded features + from the corresponding Message object. + If the features are already computed, fetch them from the message object + else compute a one hot encoding for the label as the feature vector. + """ + + # Collect one example for each label + labels_idx_examples = [] + for label_name, idx in label_id_dict.items(): + label_example = self._find_example_for_label( + label_name, training_data.intent_examples, attribute + ) + labels_idx_examples.append((idx, label_example)) + + # Sort the list of tuples based on label_idx + labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0]) + labels_example = [example for (_, example) in labels_idx_examples] + + # Collect features, precomputed if they exist, else compute on the fly + if self._check_labels_features_exist(labels_example, attribute): + ( + sequence_features, + sentence_features, + ) = self._extract_labels_precomputed_features(labels_example, attribute) + else: + sequence_features = None + sentence_features = self._compute_default_label_features(labels_example) + + label_data = RasaModelData() + label_data.add_features(LABEL_SEQUENCE_FEATURES, sequence_features) + label_data.add_features(LABEL_SENTENCE_FEATURES, sentence_features) + + if label_data.feature_not_exist( + LABEL_SENTENCE_FEATURES + ) and label_data.feature_not_exist(LABEL_SEQUENCE_FEATURES): + raise ValueError( + "No label features are present. Please check your configuration file." + ) + + label_ids = np.array([idx for (idx, _) in labels_idx_examples]) + # explicitly add last dimension to label_ids + # to track correctly dynamic sequences + label_data.add_features(LABEL_IDS, [np.expand_dims(label_ids, -1)]) + + label_data.add_lengths(LABEL_SEQUENCE_LENGTH, LABEL_SEQUENCE_FEATURES) + + return label_data + + def _use_default_label_features(self, label_ids: np.ndarray) -> List[np.ndarray]: + all_label_features = self._label_data.get(LABEL_SENTENCE_FEATURES)[0] + return [np.array([all_label_features[label_id] for label_id in label_ids])] + + def _create_model_data( + self, + training_data: List[Message], + label_id_dict: Optional[Dict[Text, int]] = None, + label_attribute: Optional[Text] = None, + training: bool = True, + ) -> RasaModelData: + """Prepare data for training and create a RasaModelData object""" + + X_sparse_sequence = [] + X_sparse_sentence = [] + X_dense_sequence = [] + X_dense_sentence = [] + Y_sparse_sequence = [] + Y_sparse_sentence = [] + Y_dense_sequence = [] + Y_dense_sentence = [] + label_ids = [] + tag_name_to_tag_ids = defaultdict(list) + + for example in training_data: + if label_attribute is None or example.get(label_attribute): + ( + _sparse_sequence, + _sparse_sentence, + _dense_sequence, + _dense_sentence, + ) = self._extract_features(example, TEXT) + if _sparse_sequence is not None: + X_sparse_sequence.append(_sparse_sequence) + if _sparse_sentence is not None: + X_sparse_sentence.append(_sparse_sentence) + if _dense_sequence is not None: + X_dense_sequence.append(_dense_sequence) + if _dense_sentence is not None: + X_dense_sentence.append(_dense_sentence) + + # only add features for intent labels during training + if training and example.get(label_attribute): + ( + _sparse_sequence, + _sparse_sentence, + _dense_sequence, + _dense_sentence, + ) = self._extract_features(example, label_attribute) + if _sparse_sequence is not None: + Y_sparse_sequence.append(_sparse_sequence) + if _sparse_sentence is not None: + Y_sparse_sentence.append(_sparse_sentence) + if _dense_sequence is not None: + Y_dense_sequence.append(_dense_sequence) + if _dense_sentence is not None: + Y_dense_sentence.append(_dense_sentence) + + if label_id_dict: + label_ids.append(label_id_dict[example.get(label_attribute)]) + + # only add tag_ids during training + if training and self.component_config.get(ENTITY_RECOGNITION): + for tag_spec in self._entity_tag_specs: + tag_name_to_tag_ids[tag_spec.tag_name].append( + self._tag_ids_for_crf(example, tag_spec) + ) + + X_sparse_sequence = np.array(X_sparse_sequence, dtype=object) + X_sparse_sentence = np.array(X_sparse_sentence, dtype=object) + X_dense_sequence = np.array(X_dense_sequence, dtype=object) + X_dense_sentence = np.array(X_dense_sentence, dtype=object) + Y_sparse_sequence = np.array(Y_sparse_sequence, dtype=object) + Y_sparse_sentence = np.array(Y_sparse_sentence, dtype=object) + Y_dense_sequence = np.array(Y_dense_sequence, dtype=object) + Y_dense_sentence = np.array(Y_dense_sentence, dtype=object) + label_ids = np.array(label_ids) + tag_name_to_tag_ids = { + tag_name: np.array(tag_ids) + for tag_name, tag_ids in tag_name_to_tag_ids.items() + } + + model_data = RasaModelData(label_key=self.label_key) + model_data.add_features( + TEXT_SEQUENCE_FEATURES, [X_sparse_sequence, X_dense_sequence] + ) + model_data.add_features( + TEXT_SENTENCE_FEATURES, [X_sparse_sentence, X_dense_sentence] + ) + model_data.add_features( + LABEL_SEQUENCE_FEATURES, [Y_sparse_sequence, Y_dense_sequence] + ) + model_data.add_features( + LABEL_SENTENCE_FEATURES, [Y_sparse_sentence, Y_dense_sentence] + ) + + if ( + label_attribute + and model_data.feature_not_exist(LABEL_SENTENCE_FEATURES) + and model_data.feature_not_exist(LABEL_SEQUENCE_FEATURES) + ): + # no label features are present, get default features from _label_data + model_data.add_features( + LABEL_SEQUENCE_FEATURES, self._use_default_label_features(label_ids) + ) + + # explicitly add last dimension to label_ids + # to track correctly dynamic sequences + model_data.add_features(LABEL_IDS, [np.expand_dims(label_ids, -1)]) + + for tag_name, tag_ids in tag_name_to_tag_ids.items(): + model_data.add_features(f"{tag_name}_{TAG_IDS}", [tag_ids]) + + model_data.add_lengths(TEXT_SEQUENCE_LENGTH, TEXT_SEQUENCE_FEATURES) + model_data.add_lengths(LABEL_SEQUENCE_LENGTH, LABEL_SEQUENCE_FEATURES) + + return model_data + + def _tag_ids_for_crf(self, example: Message, tag_spec: EntityTagSpec) -> np.ndarray: + """Create a np.array containing the tag ids of the given message.""" + if self.component_config[BILOU_FLAG]: + _tags = bilou_utils.bilou_tags_to_ids( + example, tag_spec.tags_to_ids, tag_spec.tag_name + ) + else: + _tags = [] + for token in example.get(TOKENS_NAMES[TEXT]): + _tag = determine_token_labels( + token, example.get(ENTITIES), attribute_key=tag_spec.tag_name + ) + _tags.append(tag_spec.tags_to_ids[_tag]) + + # transpose to have seq_len x 1 + return np.array([_tags]).T + + # train helpers + def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData: + """Prepares data for training. + + Performs sanity checks on training data, extracts encodings for labels. + """ + + if self.component_config[BILOU_FLAG]: + bilou_utils.apply_bilou_schema(training_data) + + label_id_index_mapping = self._label_id_index_mapping( + training_data, attribute=INTENT + ) + + if not label_id_index_mapping: + # no labels are present to train + return RasaModelData() + + self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping) + + self._label_data = self._create_label_data( + training_data, label_id_index_mapping, attribute=INTENT + ) + + self._entity_tag_specs = self._create_entity_tag_specs(training_data) + + label_attribute = ( + INTENT if self.component_config[INTENT_CLASSIFICATION] else None + ) + + model_data = self._create_model_data( + training_data.training_examples, + label_id_index_mapping, + label_attribute=label_attribute, + ) + + self._check_input_dimension_consistency(model_data) + + return model_data + + @staticmethod + def _check_enough_labels(model_data: RasaModelData) -> bool: + return len(np.unique(model_data.get(LABEL_IDS))) >= 2 + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + """Train the embedding intent classifier on a data set.""" + model_data = self.preprocess_train_data(training_data) + if model_data.is_empty(): + logger.debug( + f"Cannot train '{self.__class__.__name__}'. No data was provided. " + f"Skipping training of the classifier." + ) + return + + if self.component_config.get(INTENT_CLASSIFICATION): + if not self._check_enough_labels(model_data): + logger.error( + f"Cannot train '{self.__class__.__name__}'. " + f"Need at least 2 different intent classes. " + f"Skipping training of classifier." + ) + return + if self.component_config.get(ENTITY_RECOGNITION): + self.check_correct_entity_annotations(training_data) + + # keep one example for persisting and loading + self._data_example = model_data.first_data_example() + + self.model = self.model_class()( + data_signature=model_data.get_signature(), + label_data=self._label_data, + entity_tag_specs=self._entity_tag_specs, + config=self.component_config, + ) + + self.model.fit( + model_data, + self.component_config[EPOCHS], + self.component_config[BATCH_SIZES], + self.component_config[EVAL_NUM_EXAMPLES], + self.component_config[EVAL_NUM_EPOCHS], + self.component_config[BATCH_STRATEGY], + ) + + # process helpers + def _predict(self, message: Message) -> Optional[Dict[Text, tf.Tensor]]: + if self.model is None: + logger.debug( + f"There is no trained model for '{self.__class__.__name__}': The " + f"component is either not trained or didn't receive enough training " + f"data." + ) + return None + + # create session data from message and convert it into a batch of 1 + model_data = self._create_model_data([message], training=False) + + return self.model.predict(model_data) + + def _predict_label( + self, predict_out: Optional[Dict[Text, tf.Tensor]] + ) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]: + """Predicts the intent of the provided message.""" + + label = {"name": None, "id": None, "confidence": 0.0} + label_ranking = [] + + if predict_out is None: + return label, label_ranking + + message_sim = predict_out["i_scores"].numpy() + + message_sim = message_sim.flatten() # sim is a matrix + + label_ids = message_sim.argsort()[::-1] + + if ( + self.component_config[LOSS_TYPE] == SOFTMAX + and self.component_config[RANKING_LENGTH] > 0 + ): + message_sim = train_utils.normalize( + message_sim, self.component_config[RANKING_LENGTH] + ) + + message_sim[::-1].sort() + message_sim = message_sim.tolist() + + # if X contains all zeros do not predict some label + if label_ids.size > 0: + label = { + "id": hash(self.index_label_id_mapping[label_ids[0]]), + "name": self.index_label_id_mapping[label_ids[0]], + "confidence": message_sim[0], + } + + if ( + self.component_config[RANKING_LENGTH] + and 0 < self.component_config[RANKING_LENGTH] < LABEL_RANKING_LENGTH + ): + output_length = self.component_config[RANKING_LENGTH] + else: + output_length = LABEL_RANKING_LENGTH + + ranking = list(zip(list(label_ids), message_sim)) + ranking = ranking[:output_length] + label_ranking = [ + { + "id": hash(self.index_label_id_mapping[label_idx]), + "name": self.index_label_id_mapping[label_idx], + "confidence": score, + } + for label_idx, score in ranking + ] + + return label, label_ranking + + def _predict_entities( + self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message + ) -> List[Dict]: + if predict_out is None: + return [] + + predicted_tags, confidence_values = self._entity_label_to_tags(predict_out) + + entities = self.convert_predictions_into_entities( + message.text, + message.get(TOKENS_NAMES[TEXT], []), + predicted_tags, + confidence_values, + ) + + entities = self.add_extractor_name(entities) + entities = message.get(ENTITIES, []) + entities + + return entities + + def _entity_label_to_tags( + self, predict_out: Dict[Text, Any] + ) -> Tuple[Dict[Text, List[Text]], Dict[Text, List[float]]]: + predicted_tags = {} + confidence_values = {} + + for tag_spec in self._entity_tag_specs: + predictions = predict_out[f"e_{tag_spec.tag_name}_ids"].numpy() + confidences = predict_out[f"e_{tag_spec.tag_name}_scores"].numpy() + confidences = [float(c) for c in confidences[0]] + tags = [tag_spec.ids_to_tags[p] for p in predictions[0]] + + if self.component_config[BILOU_FLAG]: + tags = bilou_utils.ensure_consistent_bilou_tagging(tags) + + predicted_tags[tag_spec.tag_name] = tags + confidence_values[tag_spec.tag_name] = confidences + + return predicted_tags, confidence_values + + def process(self, message: Message, **kwargs: Any) -> None: + """Return the most likely label and its similarity to the input.""" + + out = self._predict(message) + + if self.component_config[INTENT_CLASSIFICATION]: + label, label_ranking = self._predict_label(out) + + message.set(INTENT, label, add_to_output=True) + message.set("intent_ranking", label_ranking, add_to_output=True) + + if self.component_config[ENTITY_RECOGNITION]: + entities = self._predict_entities(out, message) + + message.set(ENTITIES, entities, add_to_output=True) + + def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: + """Persist this model into the passed directory. + + Return the metadata necessary to load the model again. + """ + + if self.model is None: + return {"file": None} + + model_dir = Path(model_dir) + tf_model_file = model_dir / f"{file_name}.tf_model" + + io_utils.create_directory_for_file(tf_model_file) + + self.model.save(str(tf_model_file)) + + io_utils.pickle_dump( + model_dir / f"{file_name}.data_example.pkl", self._data_example + ) + io_utils.pickle_dump( + model_dir / f"{file_name}.label_data.pkl", self._label_data + ) + io_utils.json_pickle( + model_dir / f"{file_name}.index_label_id_mapping.json", + self.index_label_id_mapping, + ) + + entity_tag_specs = ( + [tag_spec._asdict() for tag_spec in self._entity_tag_specs] + if self._entity_tag_specs + else [] + ) + io_utils.dump_obj_as_json_to_file( + model_dir / f"{file_name}.entity_tag_specs.json", entity_tag_specs + ) + + return {"file": file_name} + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Text = None, + model_metadata: Metadata = None, + cached_component: Optional["DIETClassifier"] = None, + **kwargs: Any, + ) -> "DIETClassifier": + """Loads the trained model from the provided directory.""" + + if not model_dir or not meta.get("file"): + logger.debug( + f"Failed to load model for '{cls.__name__}'. " + f"Maybe you did not provide enough training data and no model was " + f"trained or the path '{os.path.abspath(model_dir)}' doesn't exist?" + ) + return cls(component_config=meta) + + ( + index_label_id_mapping, + entity_tag_specs, + label_data, + meta, + data_example, + ) = cls._load_from_files(meta, model_dir) + + meta = train_utils.update_similarity_type(meta) + + model = cls._load_model( + entity_tag_specs, label_data, meta, data_example, model_dir + ) + + return cls( + component_config=meta, + index_label_id_mapping=index_label_id_mapping, + entity_tag_specs=entity_tag_specs, + model=model, + ) + + @classmethod + def _load_from_files(cls, meta: Dict[Text, Any], model_dir: Text): + file_name = meta.get("file") + + model_dir = Path(model_dir) + + data_example = io_utils.pickle_load(model_dir / f"{file_name}.data_example.pkl") + label_data = io_utils.pickle_load(model_dir / f"{file_name}.label_data.pkl") + index_label_id_mapping = io_utils.json_unpickle( + model_dir / f"{file_name}.index_label_id_mapping.json" + ) + entity_tag_specs = io_utils.read_json_file( + model_dir / f"{file_name}.entity_tag_specs.json" + ) + entity_tag_specs = [ + EntityTagSpec( + tag_name=tag_spec["tag_name"], + ids_to_tags={ + int(key): value for key, value in tag_spec["ids_to_tags"].items() + }, + tags_to_ids={ + key: int(value) for key, value in tag_spec["tags_to_ids"].items() + }, + num_tags=tag_spec["num_tags"], + ) + for tag_spec in entity_tag_specs + ] + + # jsonpickle converts dictionary keys to strings + index_label_id_mapping = { + int(key): value for key, value in index_label_id_mapping.items() + } + + return ( + index_label_id_mapping, + entity_tag_specs, + label_data, + meta, + data_example, + ) + + @classmethod + def _load_model( + cls, + entity_tag_specs: List[EntityTagSpec], + label_data: RasaModelData, + meta: Dict[Text, Any], + data_example: Dict[Text, List[np.ndarray]], + model_dir: Text, + ): + file_name = meta.get("file") + tf_model_file = os.path.join(model_dir, file_name + ".tf_model") + + label_key = LABEL_IDS if meta[INTENT_CLASSIFICATION] else None + model_data_example = RasaModelData(label_key=label_key, data=data_example) + + model = cls.model_class().load( + tf_model_file, + model_data_example, + data_signature=model_data_example.get_signature(), + label_data=label_data, + entity_tag_specs=entity_tag_specs, + config=copy.deepcopy(meta), + ) + + # build the graph for prediction + predict_data_example = RasaModelData( + label_key=label_key, + data={ + feature_name: features + for feature_name, features in model_data_example.items() + if TEXT in feature_name + }, + ) + + model.build_for_predict(predict_data_example) + + return model + + +# accessing _tf_layers with any key results in key-error, disable it +# pytype: disable=key-error + + +class DIET(RasaModel): + def __init__( + self, + data_signature: Dict[Text, List[FeatureSignature]], + label_data: RasaModelData, + entity_tag_specs: Optional[List[EntityTagSpec]], + config: Dict[Text, Any], + ) -> None: + super().__init__( + name="DIET", + random_seed=config[RANDOM_SEED], + tensorboard_log_dir=config[TENSORBOARD_LOG_DIR], + tensorboard_log_level=config[TENSORBOARD_LOG_LEVEL], + ) + + self.config = config + + self.data_signature = data_signature + self._check_data() + + self.predict_data_signature = { + feature_name: features + for feature_name, features in data_signature.items() + if TEXT in feature_name + } + + label_batch = label_data.prepare_batch() + self.tf_label_data = self.batch_to_model_data_format( + label_batch, label_data.get_signature() + ) + + self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs) + + # tf objects + self._tf_layers: Dict[Text : tf.keras.layers.Layer] = {} + self._prepare_layers() + + # tf training + self.optimizer = tf.keras.optimizers.Adam(config[LEARNING_RATE]) + self._create_metrics() + self._update_metrics_to_log() + + self.all_labels_embed = None # needed for efficient prediction + + @staticmethod + def _ordered_tag_specs( + entity_tag_specs: Optional[List[EntityTagSpec]], + ) -> List[EntityTagSpec]: + """Ensure that order of entity tag specs matches CRF layer order.""" + if entity_tag_specs is None: + return [] + + crf_order = [ + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_GROUP, + ] + + ordered_tag_spec = [] + + for tag_name in crf_order: + for tag_spec in entity_tag_specs: + if tag_name == tag_spec.tag_name: + ordered_tag_spec.append(tag_spec) + + return ordered_tag_spec + + def _check_data(self) -> None: + if ( + TEXT_SENTENCE_FEATURES not in self.data_signature + and TEXT_SEQUENCE_FEATURES not in self.data_signature + ): + raise InvalidConfigError( + f"No text features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + if self.config[INTENT_CLASSIFICATION]: + if ( + LABEL_SENTENCE_FEATURES not in self.data_signature + and LABEL_SEQUENCE_FEATURES not in self.data_signature + ): + raise InvalidConfigError( + f"No label features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + + if self.config[SHARE_HIDDEN_LAYERS]: + different_sentence_signatures = False + different_sequence_signatures = False + if ( + TEXT_SENTENCE_FEATURES in self.data_signature + and LABEL_SENTENCE_FEATURES in self.data_signature + ): + different_sentence_signatures = ( + self.data_signature[TEXT_SENTENCE_FEATURES] + != self.data_signature[LABEL_SENTENCE_FEATURES] + ) + if ( + TEXT_SEQUENCE_FEATURES in self.data_signature + and LABEL_SEQUENCE_FEATURES in self.data_signature + ): + different_sequence_signatures = ( + self.data_signature[TEXT_SEQUENCE_FEATURES] + != self.data_signature[LABEL_SEQUENCE_FEATURES] + ) + + if different_sentence_signatures or different_sequence_signatures: + raise ValueError( + "If hidden layer weights are shared, data signatures " + "for text_features and label_features must coincide." + ) + + if ( + self.config[ENTITY_RECOGNITION] + and f"{ENTITY_ATTRIBUTE_TYPE}_{TAG_IDS}" not in self.data_signature + ): + common_utils.raise_warning( + f"You specified '{self.__class__.__name__}' to train entities, but " + f"no entities are present in the training data. Skip training of " + f"entities." + ) + self.config[ENTITY_RECOGNITION] = False + + def _create_metrics(self) -> None: + # self.metrics will have the same order as they are created + # so create loss metrics first to output losses first + self.mask_loss = tf.keras.metrics.Mean(name="m_loss") + self.intent_loss = tf.keras.metrics.Mean(name="i_loss") + self.entity_loss = tf.keras.metrics.Mean(name="e_loss") + self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss") + self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss") + # create accuracy metrics second to output accuracies second + self.mask_acc = tf.keras.metrics.Mean(name="m_acc") + self.response_acc = tf.keras.metrics.Mean(name="i_acc") + self.entity_f1 = tf.keras.metrics.Mean(name="e_f1") + self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1") + self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1") + + def _update_metrics_to_log(self) -> None: + debug_log_level = logging.getLogger("rasa").level == logging.DEBUG + + if self.config[MASKED_LM]: + self.metrics_to_log.append("m_acc") + if debug_log_level: + self.metrics_to_log.append("m_loss") + if self.config[INTENT_CLASSIFICATION]: + self.metrics_to_log.append("i_acc") + if debug_log_level: + self.metrics_to_log.append("i_loss") + if self.config[ENTITY_RECOGNITION]: + for tag_spec in self._entity_tag_specs: + if tag_spec.num_tags != 0: + name = tag_spec.tag_name + self.metrics_to_log.append(f"{name[0]}_f1") + if debug_log_level: + self.metrics_to_log.append(f"{name[0]}_loss") + + self._log_metric_info() + + def _log_metric_info(self) -> None: + metric_name = { + "t": "total", + "i": "intent", + "e": "entity", + "m": "mask", + "r": "role", + "g": "group", + } + logger.debug("Following metrics will be logged during training: ") + for metric in self.metrics_to_log: + parts = metric.split("_") + name = f"{metric_name[parts[0]]} {parts[1]}" + logger.debug(f" {metric} ({name})") + + def _prepare_layers(self) -> None: + self.text_name = TEXT + self._prepare_sequence_layers(self.text_name) + if self.config[MASKED_LM]: + self._prepare_mask_lm_layers(self.text_name) + if self.config[INTENT_CLASSIFICATION]: + self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL + self._prepare_input_layers(self.label_name) + self._prepare_label_classification_layers() + if self.config[ENTITY_RECOGNITION]: + self._prepare_entity_recognition_layers() + + def _prepare_sparse_dense_layers( + self, + feature_signatures: List[FeatureSignature], + name: Text, + reg_lambda: float, + dense_dim: int, + ) -> None: + sparse = False + dense = False + for is_sparse, feature_dimension in feature_signatures: + if is_sparse: + sparse = True + else: + dense = True + # if dense features are present + # use the feature dimension of the dense features + dense_dim = feature_dimension + + if sparse: + self._tf_layers[f"sparse_to_dense.{name}"] = layers.DenseForSparse( + units=dense_dim, reg_lambda=reg_lambda, name=name + ) + if not dense: + # create dense labels for the input to use in negative sampling + self._tf_layers[f"sparse_to_dense_ids.{name}"] = layers.DenseForSparse( + units=2, trainable=False, name=f"sparse_to_dense_ids.{name}" + ) + + def _prepare_input_layers(self, name: Text) -> None: + self._tf_layers[f"ffnn.{name}"] = layers.Ffnn( + self.config[HIDDEN_LAYERS_SIZES][name], + self.config[DROP_RATE], + self.config[REGULARIZATION_CONSTANT], + self.config[WEIGHT_SPARSITY], + name, + ) + for feature_type in [SENTENCE, SEQUENCE]: + if f"{name}_{feature_type}_features" not in self.data_signature: + continue + + self._tf_layers[ + f"sparse_input_dropout.{name}_{feature_type}" + ] = layers.SparseDropout(rate=self.config[DROP_RATE]) + self._tf_layers[ + f"dense_input_dropout.{name}_{feature_type}" + ] = tf.keras.layers.Dropout(rate=self.config[DROP_RATE]) + self._prepare_sparse_dense_layers( + self.data_signature[f"{name}_{feature_type}_features"], + f"{name}_{feature_type}", + self.config[REGULARIZATION_CONSTANT], + self.config[DENSE_DIMENSION][name], + ) + self._tf_layers[f"concat_layer.{name}_{feature_type}"] = layers.Ffnn( + [self.config[CONCAT_DIMENSION][name]], + self.config[DROP_RATE], + self.config[REGULARIZATION_CONSTANT], + self.config[WEIGHT_SPARSITY], + name, + ) + + def _prepare_embed_layers(self, name: Text) -> None: + self._tf_layers[f"embed.{name}"] = layers.Embed( + self.config[EMBEDDING_DIMENSION], + self.config[REGULARIZATION_CONSTANT], + name, + self.config[SIMILARITY_TYPE], + ) + + def _prepare_dot_product_loss(self, name: Text, scale_loss: bool) -> None: + self._tf_layers[f"loss.{name}"] = layers.DotProductLoss( + self.config[NUM_NEG], + self.config[LOSS_TYPE], + self.config[MAX_POS_SIM], + self.config[MAX_NEG_SIM], + self.config[USE_MAX_NEG_SIM], + self.config[NEGATIVE_MARGIN_SCALE], + scale_loss, + # set to 1 to get deterministic behaviour + parallel_iterations=1 if self.random_seed is not None else 1000, + ) + + def _prepare_sequence_layers(self, name: Text) -> None: + self._prepare_input_layers(name) + + if self.config[NUM_TRANSFORMER_LAYERS] > 0: + self._tf_layers[f"{name}_transformer"] = TransformerEncoder( + self.config[NUM_TRANSFORMER_LAYERS], + self.config[TRANSFORMER_SIZE], + self.config[NUM_HEADS], + self.config[TRANSFORMER_SIZE] * 4, + self.config[REGULARIZATION_CONSTANT], + dropout_rate=self.config[DROP_RATE], + attention_dropout_rate=self.config[DROP_RATE_ATTENTION], + sparsity=self.config[WEIGHT_SPARSITY], + unidirectional=self.config[UNIDIRECTIONAL_ENCODER], + use_key_relative_position=self.config[KEY_RELATIVE_ATTENTION], + use_value_relative_position=self.config[VALUE_RELATIVE_ATTENTION], + max_relative_position=self.config[MAX_RELATIVE_POSITION], + name=f"{name}_encoder", + ) + else: + # create lambda so that it can be used later without the check + self._tf_layers[f"{name}_transformer"] = lambda x, mask, training: x + + def _prepare_mask_lm_layers(self, name: Text) -> None: + self._tf_layers[f"{name}_input_mask"] = layers.InputMask() + + self._prepare_embed_layers(f"{name}_lm_mask") + self._prepare_embed_layers(f"{name}_golden_token") + + # mask loss is additional loss + # set scaling to False, so that it doesn't overpower other losses + self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False) + + def _prepare_label_classification_layers(self) -> None: + self._prepare_embed_layers(TEXT) + self._prepare_embed_layers(LABEL) + + self._prepare_dot_product_loss(LABEL, self.config[SCALE_LOSS]) + + def _prepare_entity_recognition_layers(self) -> None: + for tag_spec in self._entity_tag_specs: + name = tag_spec.tag_name + num_tags = tag_spec.num_tags + self._tf_layers[f"embed.{name}.logits"] = layers.Embed( + num_tags, self.config[REGULARIZATION_CONSTANT], f"logits.{name}" + ) + self._tf_layers[f"crf.{name}"] = layers.CRF( + num_tags, self.config[REGULARIZATION_CONSTANT], self.config[SCALE_LOSS] + ) + self._tf_layers[f"embed.{name}.tags"] = layers.Embed( + self.config[EMBEDDING_DIMENSION], + self.config[REGULARIZATION_CONSTANT], + f"tags.{name}", + ) + + def _combine_sparse_dense_features( + self, + features: List[Union[np.ndarray, tf.Tensor, tf.SparseTensor]], + name: Text, + mask: Optional[tf.Tensor] = None, + sparse_dropout: bool = False, + dense_dropout: bool = False, + ) -> Optional[tf.Tensor]: + + if not features: + return None + + dense_features = [] + + for f in features: + if isinstance(f, tf.SparseTensor): + if sparse_dropout: + _f = self._tf_layers[f"sparse_input_dropout.{name}"]( + f, self._training + ) + else: + _f = f + + dense_f = self._tf_layers[f"sparse_to_dense.{name}"](_f) + + if dense_dropout: + dense_f = self._tf_layers[f"dense_input_dropout.{name}"]( + dense_f, self._training + ) + + dense_features.append(dense_f) + else: + dense_features.append(f) + + if mask is None: + return tf.concat(dense_features, axis=-1) + + return tf.concat(dense_features, axis=-1) * mask + + def _features_as_seq_ids( + self, features: List[Union[np.ndarray, tf.Tensor, tf.SparseTensor]], name: Text + ) -> Optional[tf.Tensor]: + """Creates dense labels for negative sampling.""" + + # if there are dense features - we can use them + for f in features: + if not isinstance(f, tf.SparseTensor): + seq_ids = tf.stop_gradient(f) + # add a zero to the seq dimension for the sentence features + seq_ids = tf.pad(seq_ids, [[0, 0], [0, 1], [0, 0]]) + return seq_ids + + # use additional sparse to dense layer + for f in features: + if isinstance(f, tf.SparseTensor): + seq_ids = tf.stop_gradient( + self._tf_layers[f"sparse_to_dense_ids.{name}"](f) + ) + # add a zero to the seq dimension for the sentence features + seq_ids = tf.pad(seq_ids, [[0, 0], [0, 1], [0, 0]]) + return seq_ids + + return None + + def _combine_sequence_sentence_features( + self, + sequence_features: List[Union[tf.Tensor, tf.SparseTensor]], + sentence_features: List[Union[tf.Tensor, tf.SparseTensor]], + mask_sequence: tf.Tensor, + mask_text: tf.Tensor, + name: Text, + sparse_dropout: bool = False, + dense_dropout: bool = False, + ) -> tf.Tensor: + sequence_x = self._combine_sparse_dense_features( + sequence_features, + f"{name}_{SEQUENCE}", + mask_sequence, + sparse_dropout, + dense_dropout, + ) + sentence_x = self._combine_sparse_dense_features( + sentence_features, f"{name}_{SENTENCE}", None, sparse_dropout, dense_dropout + ) + + if sequence_x is not None and sentence_x is None: + return sequence_x + + if sequence_x is None and sentence_x is not None: + return sentence_x + + if sequence_x is not None and sentence_x is not None: + return self._concat_sequence_sentence_features( + sequence_x, sentence_x, name, mask_text + ) + + raise ValueError( + "No features are present. Please check your configuration file." + ) + + def _concat_sequence_sentence_features( + self, + sequence_x: tf.Tensor, + sentence_x: tf.Tensor, + name: Text, + mask_text: tf.Tensor, + ): + if sequence_x.shape[-1] != sentence_x.shape[-1]: + sequence_x = self._tf_layers[f"concat_layer.{name}_{SEQUENCE}"]( + sequence_x, self._training + ) + sentence_x = self._tf_layers[f"concat_layer.{name}_{SENTENCE}"]( + sentence_x, self._training + ) + + # we need to concatenate the sequence features with the sentence features + # we cannot use tf.concat as the sequence features are padded + + # (1) get position of sentence features in mask + last = mask_text * tf.math.cumprod( + 1 - mask_text, axis=1, exclusive=True, reverse=True + ) + # (2) multiply by sentence features so that we get a matrix of + # batch-dim x seq-dim x feature-dim with zeros everywhere except for + # for the sentence features + sentence_x = last * sentence_x + + # (3) add a zero to the end of sequence matrix to match the final shape + sequence_x = tf.pad(sequence_x, [[0, 0], [0, 1], [0, 0]]) + + # (4) sum up sequence features and sentence features + return sequence_x + sentence_x + + def _create_bow( + self, + sequence_features: List[Union[tf.Tensor, tf.SparseTensor]], + sentence_features: List[Union[tf.Tensor, tf.SparseTensor]], + sequence_mask: tf.Tensor, + text_mask: tf.Tensor, + name: Text, + sparse_dropout: bool = False, + dense_dropout: bool = False, + ) -> tf.Tensor: + + x = self._combine_sequence_sentence_features( + sequence_features, + sentence_features, + sequence_mask, + text_mask, + name, + sparse_dropout, + dense_dropout, + ) + x = tf.reduce_sum(x, axis=1) # convert to bag-of-words + return self._tf_layers[f"ffnn.{name}"](x, self._training) + + def _create_sequence( + self, + sequence_features: List[Union[tf.Tensor, tf.SparseTensor]], + sentence_features: List[Union[tf.Tensor, tf.SparseTensor]], + mask_sequence: tf.Tensor, + mask: tf.Tensor, + name: Text, + sparse_dropout: bool = False, + dense_dropout: bool = False, + masked_lm_loss: bool = False, + sequence_ids: bool = False, + ) -> Tuple[tf.Tensor, tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor]]: + if sequence_ids: + seq_ids = self._features_as_seq_ids(sequence_features, f"{name}_{SEQUENCE}") + else: + seq_ids = None + + inputs = self._combine_sequence_sentence_features( + sequence_features, + sentence_features, + mask_sequence, + mask, + name, + sparse_dropout, + dense_dropout, + ) + inputs = self._tf_layers[f"ffnn.{name}"](inputs, self._training) + + if masked_lm_loss: + transformer_inputs, lm_mask_bool = self._tf_layers[f"{name}_input_mask"]( + inputs, mask, self._training + ) + else: + transformer_inputs = inputs + lm_mask_bool = None + + outputs = self._tf_layers[f"{name}_transformer"]( + transformer_inputs, 1 - mask, self._training + ) + + if self.config[NUM_TRANSFORMER_LAYERS] > 0: + # apply activation + outputs = tfa.activations.gelu(outputs) + + return outputs, inputs, seq_ids, lm_mask_bool + + def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]: + all_label_ids = self.tf_label_data[LABEL_IDS][0] + + mask_sequence_label = self._get_mask_for( + self.tf_label_data, LABEL_SEQUENCE_LENGTH + ) + + x = self._create_bow( + self.tf_label_data[LABEL_SEQUENCE_FEATURES], + self.tf_label_data[LABEL_SENTENCE_FEATURES], + mask_sequence_label, + mask_sequence_label, + self.label_name, + ) + all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x) + + return all_label_ids, all_labels_embed + + @staticmethod + def _last_token(x: tf.Tensor, sequence_lengths: tf.Tensor) -> tf.Tensor: + last_sequence_index = tf.maximum(0, sequence_lengths - 1) + batch_index = tf.range(tf.shape(last_sequence_index)[0]) + + indices = tf.stack([batch_index, last_sequence_index], axis=1) + return tf.gather_nd(x, indices) + + def _mask_loss( + self, + outputs: tf.Tensor, + inputs: tf.Tensor, + seq_ids: tf.Tensor, + lm_mask_bool: tf.Tensor, + name: Text, + ) -> tf.Tensor: + # make sure there is at least one element in the mask + lm_mask_bool = tf.cond( + tf.reduce_any(lm_mask_bool), + lambda: lm_mask_bool, + lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(lm_mask_bool)), + ) + + lm_mask_bool = tf.squeeze(lm_mask_bool, -1) + # pick elements that were masked + outputs = tf.boolean_mask(outputs, lm_mask_bool) + inputs = tf.boolean_mask(inputs, lm_mask_bool) + ids = tf.boolean_mask(seq_ids, lm_mask_bool) + + outputs_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs) + inputs_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs) + + return self._tf_layers[f"loss.{name}_mask"]( + outputs_embed, inputs_embed, ids, inputs_embed, ids + ) + + def _calculate_label_loss( + self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor + ) -> tf.Tensor: + all_label_ids, all_labels_embed = self._create_all_labels() + + text_embed = self._tf_layers[f"embed.{TEXT}"](text_features) + label_embed = self._tf_layers[f"embed.{LABEL}"](label_features) + + return self._tf_layers[f"loss.{LABEL}"]( + text_embed, label_embed, label_ids, all_labels_embed, all_label_ids + ) + + def _calculate_entity_loss( + self, + inputs: tf.Tensor, + tag_ids: tf.Tensor, + mask: tf.Tensor, + sequence_lengths: tf.Tensor, + tag_name: Text, + entity_tags: Optional[tf.Tensor] = None, + ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + + tag_ids = tf.cast(tag_ids[:, :, 0], tf.int32) + + if entity_tags is not None: + _tags = self._tf_layers[f"embed.{tag_name}.tags"](entity_tags) + inputs = tf.concat([inputs, _tags], axis=-1) + + logits = self._tf_layers[f"embed.{tag_name}.logits"](inputs) + + # should call first to build weights + pred_ids, _ = self._tf_layers[f"crf.{tag_name}"](logits, sequence_lengths) + # pytype cannot infer that 'self._tf_layers["crf"]' has the method '.loss' + # pytype: disable=attribute-error + loss = self._tf_layers[f"crf.{tag_name}"].loss( + logits, tag_ids, sequence_lengths + ) + f1 = self._tf_layers[f"crf.{tag_name}"].f1_score(tag_ids, pred_ids, mask) + # pytype: enable=attribute-error + + return loss, f1, logits + + @staticmethod + def _compute_mask(sequence_lengths: tf.Tensor) -> tf.Tensor: + mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32) + # explicitly add last dimension to mask + # to track correctly dynamic sequences + return tf.expand_dims(mask, -1) + + @staticmethod + def _get_sequence_lengths( + tf_batch_data: Dict[Text, List[tf.Tensor]], name: Text, batch_dim: int = 1 + ) -> tf.Tensor: + # sentence features have a sequence lengths of 1 + # if sequence features are present we add the sequence lengths of those + + sequence_lengths = tf.ones([batch_dim], dtype=tf.int32) + if name in tf_batch_data: + sequence_lengths += tf.cast(tf_batch_data[name][0], dtype=tf.int32) + + return sequence_lengths + + def _get_mask_for( + self, tf_batch_data: Dict[Text, List[tf.Tensor]], name: Text + ) -> Optional[tf.Tensor]: + if name not in tf_batch_data: + return None + + sequence_lengths = tf.cast(tf_batch_data[name][0], dtype=tf.int32) + return self._compute_mask(sequence_lengths) + + @staticmethod + def _get_batch_dim(tf_batch_data: Dict[Text, List[tf.Tensor]]) -> int: + if TEXT_SEQUENCE_FEATURES in tf_batch_data: + return tf.shape(tf_batch_data[TEXT_SEQUENCE_FEATURES][0])[0] + + return tf.shape(tf_batch_data[TEXT_SENTENCE_FEATURES][0])[0] + + def batch_loss( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> tf.Tensor: + tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature) + + batch_dim = self._get_batch_dim(tf_batch_data) + mask_sequence_text = self._get_mask_for(tf_batch_data, TEXT_SEQUENCE_LENGTH) + sequence_lengths = self._get_sequence_lengths( + tf_batch_data, TEXT_SEQUENCE_LENGTH, batch_dim + ) + mask_text = self._compute_mask(sequence_lengths) + + ( + text_transformed, + text_in, + text_seq_ids, + lm_mask_bool_text, + ) = self._create_sequence( + tf_batch_data[TEXT_SEQUENCE_FEATURES], + tf_batch_data[TEXT_SENTENCE_FEATURES], + mask_sequence_text, + mask_text, + self.text_name, + sparse_dropout=self.config[SPARSE_INPUT_DROPOUT], + dense_dropout=self.config[DENSE_INPUT_DROPOUT], + masked_lm_loss=self.config[MASKED_LM], + sequence_ids=True, + ) + + losses = [] + + if self.config[MASKED_LM]: + loss, acc = self._mask_loss( + text_transformed, text_in, text_seq_ids, lm_mask_bool_text, TEXT + ) + self.mask_loss.update_state(loss) + self.mask_acc.update_state(acc) + losses.append(loss) + + if self.config[INTENT_CLASSIFICATION]: + loss = self._batch_loss_intent( + sequence_lengths, mask_text, text_transformed, tf_batch_data + ) + losses.append(loss) + + if self.config[ENTITY_RECOGNITION]: + losses += self._batch_loss_entities( + mask_text, sequence_lengths, text_transformed, tf_batch_data + ) + + return tf.math.add_n(losses) + + def _batch_loss_intent( + self, + sequence_lengths: tf.Tensor, + mask_text: tf.Tensor, + text_transformed: tf.Tensor, + tf_batch_data: Dict[Text, List[tf.Tensor]], + ) -> tf.Tensor: + # get sentence features vector for intent classification + sentence_vector = self._last_token(text_transformed, sequence_lengths) + + mask_sequence_label = self._get_mask_for(tf_batch_data, LABEL_SEQUENCE_LENGTH) + + label_ids = tf_batch_data[LABEL_IDS][0] + label = self._create_bow( + tf_batch_data[LABEL_SEQUENCE_FEATURES], + tf_batch_data[LABEL_SENTENCE_FEATURES], + mask_sequence_label, + mask_text, + self.label_name, + ) + + loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids) + + self.intent_loss.update_state(loss) + self.response_acc.update_state(acc) + + return loss + + def _batch_loss_entities( + self, + mask_text: tf.Tensor, + sequence_lengths: tf.Tensor, + text_transformed: tf.Tensor, + tf_batch_data: Dict[Text, List[tf.Tensor]], + ) -> List[tf.Tensor]: + losses = [] + + sequence_lengths -= 1 # remove sentence features + + entity_tags = None + + for tag_spec in self._entity_tag_specs: + if tag_spec.num_tags == 0: + continue + + tag_ids = tf_batch_data[f"{tag_spec.tag_name}_{TAG_IDS}"][0] + # add a zero (no entity) for the sentence features to match the shape of + # inputs + tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]]) + + loss, f1, _logits = self._calculate_entity_loss( + text_transformed, + tag_ids, + mask_text, + sequence_lengths, + tag_spec.tag_name, + entity_tags, + ) + + if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE: + # use the entity tags as additional input for the role + # and group CRF + entity_tags = tf.one_hot( + tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags + ) + + self._update_entity_metrics(loss, f1, tag_spec.tag_name) + + losses.append(loss) + + return losses + + def _update_entity_metrics(self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text): + if tag_name == ENTITY_ATTRIBUTE_TYPE: + self.entity_loss.update_state(loss) + self.entity_f1.update_state(f1) + elif tag_name == ENTITY_ATTRIBUTE_GROUP: + self.entity_group_loss.update_state(loss) + self.entity_group_f1.update_state(f1) + elif tag_name == ENTITY_ATTRIBUTE_ROLE: + self.entity_role_loss.update_state(loss) + self.entity_role_f1.update_state(f1) + + def batch_predict( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> Dict[Text, tf.Tensor]: + tf_batch_data = self.batch_to_model_data_format( + batch_in, self.predict_data_signature + ) + + mask_sequence_text = self._get_mask_for(tf_batch_data, TEXT_SEQUENCE_LENGTH) + sequence_lengths = self._get_sequence_lengths( + tf_batch_data, TEXT_SEQUENCE_LENGTH, batch_dim=1 + ) + + mask = self._compute_mask(sequence_lengths) + + text_transformed, _, _, _ = self._create_sequence( + tf_batch_data[TEXT_SEQUENCE_FEATURES], + tf_batch_data[TEXT_SENTENCE_FEATURES], + mask_sequence_text, + mask, + self.text_name, + ) + + predictions: Dict[Text, tf.Tensor] = {} + + if self.config[INTENT_CLASSIFICATION]: + predictions.update( + self._batch_predict_intents(sequence_lengths, text_transformed) + ) + + if self.config[ENTITY_RECOGNITION]: + predictions.update( + self._batch_predict_entities(sequence_lengths, text_transformed) + ) + + return predictions + + def _batch_predict_entities( + self, sequence_lengths: tf.Tensor, text_transformed: tf.Tensor + ) -> Dict[Text, tf.Tensor]: + predictions: Dict[Text, tf.Tensor] = {} + + entity_tags = None + + for tag_spec in self._entity_tag_specs: + # skip crf layer if it was not trained + if tag_spec.num_tags == 0: + continue + + name = tag_spec.tag_name + _input = text_transformed + + if entity_tags is not None: + _tags = self._tf_layers[f"embed.{name}.tags"](entity_tags) + _input = tf.concat([_input, _tags], axis=-1) + + _logits = self._tf_layers[f"embed.{name}.logits"](_input) + pred_ids, confidences = self._tf_layers[f"crf.{name}"]( + _logits, sequence_lengths - 1 + ) + + predictions[f"e_{name}_ids"] = pred_ids + predictions[f"e_{name}_scores"] = confidences + + if name == ENTITY_ATTRIBUTE_TYPE: + # use the entity tags as additional input for the role + # and group CRF + entity_tags = tf.one_hot( + tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags + ) + + return predictions + + def _batch_predict_intents( + self, sequence_lengths: tf.Tensor, text_transformed: tf.Tensor + ) -> Dict[Text, tf.Tensor]: + + if self.all_labels_embed is None: + _, self.all_labels_embed = self._create_all_labels() + + # get sentence feature vector for intent classification + sentence_vector = self._last_token(text_transformed, sequence_lengths) + sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector) + + # pytype cannot infer that 'self._tf_layers[f"loss.{LABEL}"]' has methods + # like '.sim' or '.confidence_from_sim' + # pytype: disable=attribute-error + sim_all = self._tf_layers[f"loss.{LABEL}"].sim( + sentence_vector_embed[:, tf.newaxis, :], + self.all_labels_embed[tf.newaxis, :, :], + ) + scores = self._tf_layers[f"loss.{LABEL}"].confidence_from_sim( + sim_all, self.config[SIMILARITY_TYPE] + ) + # pytype: enable=attribute-error + + return {"i_scores": scores} + + +# pytype: enable=key-error diff --git a/rasa/nlu/classifiers/embedding_intent_classifier.py b/rasa/nlu/classifiers/embedding_intent_classifier.py deleted file mode 100644 index b72e64ade452..000000000000 --- a/rasa/nlu/classifiers/embedding_intent_classifier.py +++ /dev/null @@ -1,752 +0,0 @@ -import logging -import numpy as np -import os -import pickle -import typing -from typing import Any, Dict, List, Optional, Text, Tuple -import warnings - -from rasa.nlu.classifiers import LABEL_RANKING_LENGTH -from rasa.nlu.components import Component -from rasa.utils import train_utils -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, -) - -import tensorflow as tf - -# avoid warning println on contrib import - remove for tf 2 -tf.contrib._warning = None - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from rasa.nlu.config import RasaNLUModelConfig - from rasa.nlu.training_data import TrainingData - from rasa.nlu.model import Metadata - from rasa.nlu.training_data import Message - - -class EmbeddingIntentClassifier(Component): - """Intent classifier using supervised embeddings. - - The embedding intent classifier embeds user inputs - and intent labels into the same space. - Supervised embeddings are trained by maximizing similarity between them. - It also provides rankings of the labels that did not "win". - - The embedding intent classifier needs to be preceded by - a featurizer in the pipeline. - This featurizer creates the features used for the embeddings. - It is recommended to use ``CountVectorsFeaturizer`` that - can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``. - - Based on the starspace idea from: https://arxiv.org/abs/1709.03856. - However, in this implementation the `mu` parameter is treated differently - and additional hidden layers are added together with dropout. - """ - - provides = ["intent", "intent_ranking"] - - requires = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - # default properties (DOC MARKER - don't remove) - defaults = { - # nn architecture - # sizes of hidden layers before the embedding layer for input words - # the number of hidden layers is thus equal to the length of this list - "hidden_layers_sizes_a": [256, 128], - # sizes of hidden layers before the embedding layer for intent labels - # the number of hidden layers is thus equal to the length of this list - "hidden_layers_sizes_b": [], - # Whether to share the hidden layer weights between input words and labels - "share_hidden_layers": False, - # training parameters - # initial and final batch sizes - batch size will be - # linearly increased for each epoch - "batch_size": [64, 256], - # how to create batches - "batch_strategy": "balanced", # string 'sequence' or 'balanced' - # number of epochs - "epochs": 300, - # set random seed to any int to get reproducible results - "random_seed": None, - # embedding parameters - # dimension size of embedding vectors - "embed_dim": 20, - # the type of the similarity - "num_neg": 20, - # flag if minimize only maximum similarity over incorrect actions - "similarity_type": "auto", # string 'auto' or 'cosine' or 'inner' - # the type of the loss function - "loss_type": "softmax", # string 'softmax' or 'margin' - # how similar the algorithm should try - # to make embedding vectors for correct labels - "mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine' - # maximum negative similarity for incorrect labels - "mu_neg": -0.4, # should be -1.0 < ... < 1.0 for 'cosine' - # flag: if true, only minimize the maximum similarity for incorrect labels - "use_max_sim_neg": True, - # scale loss inverse proportionally to confidence of correct prediction - "scale_loss": True, - # regularization parameters - # the scale of L2 regularization - "C2": 0.002, - # the scale of how critical the algorithm should be of minimizing the - # maximum similarity between embeddings of different labels - "C_emb": 0.8, - # dropout rate for rnn - "droprate": 0.2, - # visualization of accuracy - # how often to calculate training accuracy - "evaluate_every_num_epochs": 20, # small values may hurt performance - # how many examples to use for calculation of training accuracy - "evaluate_on_num_examples": 0, # large values may hurt performance - } - # end default properties (DOC MARKER - don't remove) - - def __init__( - self, - component_config: Optional[Dict[Text, Any]] = None, - inverted_label_dict: Optional[Dict[int, Text]] = None, - session: Optional["tf.Session"] = None, - graph: Optional["tf.Graph"] = None, - message_placeholder: Optional["tf.Tensor"] = None, - label_placeholder: Optional["tf.Tensor"] = None, - similarity_all: Optional["tf.Tensor"] = None, - pred_confidence: Optional["tf.Tensor"] = None, - similarity: Optional["tf.Tensor"] = None, - message_embed: Optional["tf.Tensor"] = None, - label_embed: Optional["tf.Tensor"] = None, - all_labels_embed: Optional["tf.Tensor"] = None, - ) -> None: - """Declare instant variables with default values""" - - super(EmbeddingIntentClassifier, self).__init__(component_config) - - self._load_params() - - # transform numbers to labels - self.inverted_label_dict = inverted_label_dict - # encode all label_ids with numbers - self._encoded_all_label_ids = None - - # tf related instances - self.session = session - self.graph = graph - self.a_in = message_placeholder - self.b_in = label_placeholder - self.sim_all = similarity_all - self.pred_confidence = pred_confidence - self.sim = similarity - - # persisted embeddings - self.message_embed = message_embed - self.label_embed = label_embed - self.all_labels_embed = all_labels_embed - - # internal tf instances - self._iterator = None - self._train_op = None - self._is_training = None - - # config migration warning - def _check_old_config_variables(self, config: Dict[Text, Any]) -> None: - - removed_tokenization_params = [ - "intent_tokenization_flag", - "intent_split_symbol", - ] - for removed_param in removed_tokenization_params: - if removed_param in config: - warnings.warn( - "Intent tokenization has been moved to Tokenizer components. " - "Your config still mentions '{}'. Tokenization may fail if you specify the parameter here." - "Please specify the parameter 'intent_tokenization_flag' and 'intent_split_symbol' in the " - "tokenizer of your NLU pipeline".format(removed_param) - ) - - # init helpers - def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None: - self.hidden_layer_sizes = { - "a": config["hidden_layers_sizes_a"], - "b": config["hidden_layers_sizes_b"], - } - self.share_hidden_layers = config["share_hidden_layers"] - if ( - self.share_hidden_layers - and self.hidden_layer_sizes["a"] != self.hidden_layer_sizes["b"] - ): - raise ValueError( - "If hidden layer weights are shared," - "hidden_layer_sizes for a and b must coincide" - ) - - self.batch_size = config["batch_size"] - self.batch_strategy = config["batch_strategy"] - - self.epochs = config["epochs"] - - self.random_seed = self.component_config["random_seed"] - - def _load_embedding_params(self, config: Dict[Text, Any]) -> None: - self.embed_dim = config["embed_dim"] - self.num_neg = config["num_neg"] - - self.similarity_type = config["similarity_type"] - self.loss_type = config["loss_type"] - if self.similarity_type == "auto": - if self.loss_type == "softmax": - self.similarity_type = "inner" - elif self.loss_type == "margin": - self.similarity_type = "cosine" - - self.mu_pos = config["mu_pos"] - self.mu_neg = config["mu_neg"] - self.use_max_sim_neg = config["use_max_sim_neg"] - - self.scale_loss = config["scale_loss"] - - def _load_regularization_params(self, config: Dict[Text, Any]) -> None: - self.C2 = config["C2"] - self.C_emb = config["C_emb"] - self.droprate = config["droprate"] - - def _load_visual_params(self, config: Dict[Text, Any]) -> None: - self.evaluate_every_num_epochs = config["evaluate_every_num_epochs"] - if self.evaluate_every_num_epochs < 1: - self.evaluate_every_num_epochs = self.epochs - self.evaluate_on_num_examples = config["evaluate_on_num_examples"] - - def _load_params(self) -> None: - - self._check_old_config_variables(self.component_config) - self._tf_config = train_utils.load_tf_config(self.component_config) - self._load_nn_architecture_params(self.component_config) - self._load_embedding_params(self.component_config) - self._load_regularization_params(self.component_config) - self._load_visual_params(self.component_config) - - # package safety checks - @classmethod - def required_packages(cls) -> List[Text]: - return ["tensorflow"] - - # training data helpers: - @staticmethod - def _create_label_id_dict( - training_data: "TrainingData", attribute: Text - ) -> Dict[Text, int]: - """Create label_id dictionary""" - - distinct_label_ids = set( - [example.get(attribute) for example in training_data.intent_examples] - ) - {None} - return { - label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids)) - } - - @staticmethod - def _find_example_for_label(label, examples, attribute): - for ex in examples: - if ex.get(attribute) == label: - return ex - return None - - @staticmethod - def _check_labels_features_exist( - labels_example: List[Tuple[int, "Message"]], attribute_feature_name: Text - ) -> bool: - """Check if all labels have features set""" - for (label_idx, label_example) in labels_example: - if label_example.get(attribute_feature_name) is None: - return False - return True - - @staticmethod - def _extract_labels_precomputed_features( - label_examples: List[Tuple[int, "Message"]], attribute_feature_name: Text - ) -> np.ndarray: - - # Collect precomputed encodings - encoded_id_labels = [ - (label_idx, label_example.get(attribute_feature_name)) - for (label_idx, label_example) in label_examples - ] - - # Sort the list of tuples based on label_idx - encoded_id_labels = sorted(encoded_id_labels, key=lambda x: x[0]) - - encoded_all_labels = [encoding for (index, encoding) in encoded_id_labels] - - return np.array(encoded_all_labels) - - def _compute_default_label_features( - self, labels_example: List[Tuple[int, "Message"]] - ) -> np.ndarray: - """Compute one-hot representation for the labels""" - - return np.eye(len(labels_example)) - - def _create_encoded_label_ids( - self, - training_data: "TrainingData", - label_id_dict: Dict[Text, int], - attribute: Text, - attribute_feature_name: Text, - ) -> np.ndarray: - """Create matrix with label_ids encoded in rows as bag of words. If the features are already computed, fetch - them from the message object else compute a one hot encoding for the label as the feature vector - Find a training example for each label and get the encoded features from the corresponding Message object""" - - labels_example = [] - - # Collect one example for each label - for label_name, idx in label_id_dict.items(): - label_example = self._find_example_for_label( - label_name, training_data.intent_examples, attribute - ) - labels_example.append((idx, label_example)) - - # Collect features, precomputed if they exist, else compute on the fly - if self._check_labels_features_exist(labels_example, attribute_feature_name): - encoded_id_labels = self._extract_labels_precomputed_features( - labels_example, attribute_feature_name - ) - else: - encoded_id_labels = self._compute_default_label_features(labels_example) - - return encoded_id_labels - - # noinspection PyPep8Naming - def _create_session_data( - self, - training_data: "TrainingData", - label_id_dict: Dict[Text, int], - attribute: Text, - ) -> "train_utils.SessionData": - """Prepare data for training and create a SessionData object""" - - X = [] - label_ids = [] - Y = [] - - for e in training_data.intent_examples: - if e.get(attribute): - X.append(e.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE])) - label_ids.append(label_id_dict[e.get(attribute)]) - - X = np.array(X) - label_ids = np.array(label_ids) - - for label_id_idx in label_ids: - Y.append(self._encoded_all_label_ids[label_id_idx]) - - Y = np.array(Y) - - return train_utils.SessionData(X=X, Y=Y, label_ids=label_ids) - - # tf helpers: - def _create_tf_embed_fnn( - self, - x_in: "tf.Tensor", - layer_sizes: List[int], - fnn_name: Text, - embed_name: Text, - ) -> "tf.Tensor": - """Create nn with hidden layers and name""" - - x = train_utils.create_tf_fnn( - x_in, - layer_sizes, - self.droprate, - self.C2, - self._is_training, - layer_name_suffix=fnn_name, - ) - return train_utils.create_tf_embed( - x, - self.embed_dim, - self.C2, - self.similarity_type, - layer_name_suffix=embed_name, - ) - - def _build_tf_train_graph(self) -> Tuple["tf.Tensor", "tf.Tensor"]: - self.a_in, self.b_in = self._iterator.get_next() - - all_label_ids = tf.constant( - self._encoded_all_label_ids, dtype=tf.float32, name="all_label_ids" - ) - - self.message_embed = self._create_tf_embed_fnn( - self.a_in, - self.hidden_layer_sizes["a"], - fnn_name="a_b" if self.share_hidden_layers else "a", - embed_name="a", - ) - - self.label_embed = self._create_tf_embed_fnn( - self.b_in, - self.hidden_layer_sizes["b"], - fnn_name="a_b" if self.share_hidden_layers else "b", - embed_name="b", - ) - self.all_labels_embed = self._create_tf_embed_fnn( - all_label_ids, - self.hidden_layer_sizes["b"], - fnn_name="a_b" if self.share_hidden_layers else "b", - embed_name="b", - ) - - return train_utils.calculate_loss_acc( - self.message_embed, - self.label_embed, - self.b_in, - self.all_labels_embed, - all_label_ids, - self.num_neg, - None, - self.loss_type, - self.mu_pos, - self.mu_neg, - self.use_max_sim_neg, - self.C_emb, - self.scale_loss, - ) - - def _build_tf_pred_graph( - self, session_data: "train_utils.SessionData" - ) -> "tf.Tensor": - self.a_in = tf.placeholder( - tf.float32, (None, session_data.X.shape[-1]), name="a" - ) - self.b_in = tf.placeholder( - tf.float32, (None, None, session_data.Y.shape[-1]), name="b" - ) - - self.message_embed = self._create_tf_embed_fnn( - self.a_in, - self.hidden_layer_sizes["a"], - fnn_name="a_b" if self.share_hidden_layers else "a", - embed_name="a", - ) - - self.sim_all = train_utils.tf_raw_sim( - self.message_embed[:, tf.newaxis, :], - self.all_labels_embed[tf.newaxis, :, :], - None, - ) - - self.label_embed = self._create_tf_embed_fnn( - self.b_in, - self.hidden_layer_sizes["b"], - fnn_name="a_b" if self.share_hidden_layers else "b", - embed_name="b", - ) - - self.sim = train_utils.tf_raw_sim( - self.message_embed[:, tf.newaxis, :], self.label_embed, None - ) - - return train_utils.confidence_from_sim(self.sim_all, self.similarity_type) - - def check_input_dimension_consistency(self, session_data): - - if self.share_hidden_layers: - if session_data.X[0].shape[-1] != session_data.Y[0].shape[-1]: - raise ValueError( - "If embeddings are shared " - "text features and label features " - "must coincide. Check the output dimensions of previous components." - ) - - def preprocess_train_data(self, training_data): - """Performs sanity checks on training data, extracts encodings for labels and prepares data for training""" - - label_id_dict = self._create_label_id_dict( - training_data, attribute=MESSAGE_INTENT_ATTRIBUTE - ) - - self.inverted_label_dict = {v: k for k, v in label_id_dict.items()} - self._encoded_all_label_ids = self._create_encoded_label_ids( - training_data, - label_id_dict, - attribute=MESSAGE_INTENT_ATTRIBUTE, - attribute_feature_name=MESSAGE_VECTOR_FEATURE_NAMES[ - MESSAGE_INTENT_ATTRIBUTE - ], - ) - - # check if number of negatives is less than number of label_ids - logger.debug( - "Check if num_neg {} is smaller than " - "number of label_ids {}, " - "else set num_neg to the number of label_ids - 1" - "".format(self.num_neg, self._encoded_all_label_ids.shape[0]) - ) - # noinspection PyAttributeOutsideInit - self.num_neg = min(self.num_neg, self._encoded_all_label_ids.shape[0] - 1) - - session_data = self._create_session_data( - training_data, label_id_dict, attribute=MESSAGE_INTENT_ATTRIBUTE - ) - - self.check_input_dimension_consistency(session_data) - - return session_data - - def _check_enough_labels(self, session_data) -> bool: - - return len(np.unique(session_data.label_ids)) >= 2 - - def train( - self, - training_data: "TrainingData", - cfg: Optional["RasaNLUModelConfig"] = None, - **kwargs: Any - ) -> None: - """Train the embedding label classifier on a data set.""" - - logger.debug("Started training embedding classifier.") - - # set numpy random seed - np.random.seed(self.random_seed) - - session_data = self.preprocess_train_data(training_data) - - possible_to_train = self._check_enough_labels(session_data) - - if not possible_to_train: - logger.error( - "Can not train a classifier. " - "Need at least 2 different classes. " - "Skipping training of classifier." - ) - return - - if self.evaluate_on_num_examples: - session_data, eval_session_data = train_utils.train_val_split( - session_data, self.evaluate_on_num_examples, self.random_seed - ) - else: - eval_session_data = None - - self.graph = tf.Graph() - with self.graph.as_default(): - # set random seed - tf.set_random_seed(self.random_seed) - - # allows increasing batch size - batch_size_in = tf.placeholder(tf.int64) - - ( - self._iterator, - train_init_op, - eval_init_op, - ) = train_utils.create_iterator_init_datasets( - session_data, eval_session_data, batch_size_in, self.batch_strategy - ) - - self._is_training = tf.placeholder_with_default(False, shape=()) - - loss, acc = self._build_tf_train_graph() - - # define which optimizer to use - self._train_op = tf.train.AdamOptimizer().minimize(loss) - - # train tensorflow graph - self.session = tf.Session(config=self._tf_config) - train_utils.train_tf_dataset( - train_init_op, - eval_init_op, - batch_size_in, - loss, - acc, - self._train_op, - self.session, - self._is_training, - self.epochs, - self.batch_size, - self.evaluate_on_num_examples, - self.evaluate_every_num_epochs, - ) - - # rebuild the graph for prediction - self.pred_confidence = self._build_tf_pred_graph(session_data) - - # process helpers - # noinspection PyPep8Naming - def _calculate_message_sim(self, X: np.ndarray) -> Tuple[np.ndarray, List[float]]: - """Calculate message similarities""" - - message_sim = self.session.run(self.pred_confidence, feed_dict={self.a_in: X}) - - message_sim = message_sim.flatten() # sim is a matrix - - label_ids = message_sim.argsort()[::-1] - message_sim[::-1].sort() - - # transform sim to python list for JSON serializing - return label_ids, message_sim.tolist() - - def predict_label(self, message): - - label = {"name": None, "confidence": 0.0} - label_ranking = [] - if self.session is None: - logger.error( - "There is no trained tf.session: " - "component is either not trained or " - "didn't receive enough training data" - ) - - else: - # get features (bag of words) for a message - # noinspection PyPep8Naming - X = message.get( - MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE] - ).reshape(1, -1) - - # load tf graph and session - label_ids, message_sim = self._calculate_message_sim(X) - - # if X contains all zeros do not predict some label - if X.any() and label_ids.size > 0: - label = { - "name": self.inverted_label_dict[label_ids[0]], - "confidence": message_sim[0], - } - - ranking = list(zip(list(label_ids), message_sim)) - ranking = ranking[:LABEL_RANKING_LENGTH] - label_ranking = [ - {"name": self.inverted_label_dict[label_idx], "confidence": score} - for label_idx, score in ranking - ] - return label, label_ranking - - def process(self, message: "Message", **kwargs: Any) -> None: - """Return the most likely label and its similarity to the input.""" - - label, label_ranking = self.predict_label(message) - - message.set("intent", label, add_to_output=True) - message.set("intent_ranking", label_ranking, add_to_output=True) - - def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: - """Persist this model into the passed directory. - - Return the metadata necessary to load the model again. - """ - - if self.session is None: - return {"file": None} - - checkpoint = os.path.join(model_dir, file_name + ".ckpt") - - try: - os.makedirs(os.path.dirname(checkpoint)) - except OSError as e: - # be happy if someone already created the path - import errno - - if e.errno != errno.EEXIST: - raise - with self.graph.as_default(): - train_utils.persist_tensor("message_placeholder", self.a_in, self.graph) - train_utils.persist_tensor("label_placeholder", self.b_in, self.graph) - - train_utils.persist_tensor("similarity_all", self.sim_all, self.graph) - train_utils.persist_tensor( - "pred_confidence", self.pred_confidence, self.graph - ) - train_utils.persist_tensor("similarity", self.sim, self.graph) - - train_utils.persist_tensor("message_embed", self.message_embed, self.graph) - train_utils.persist_tensor("label_embed", self.label_embed, self.graph) - train_utils.persist_tensor( - "all_labels_embed", self.all_labels_embed, self.graph - ) - - saver = tf.train.Saver() - saver.save(self.session, checkpoint) - - with open( - os.path.join(model_dir, file_name + ".inv_label_dict.pkl"), "wb" - ) as f: - pickle.dump(self.inverted_label_dict, f) - - with open(os.path.join(model_dir, file_name + ".tf_config.pkl"), "wb") as f: - pickle.dump(self._tf_config, f) - - return {"file": file_name} - - @classmethod - def load( - cls, - meta: Dict[Text, Any], - model_dir: Text = None, - model_metadata: "Metadata" = None, - cached_component: Optional["EmbeddingIntentClassifier"] = None, - **kwargs: Any - ) -> "EmbeddingIntentClassifier": - - if model_dir and meta.get("file"): - file_name = meta.get("file") - checkpoint = os.path.join(model_dir, file_name + ".ckpt") - - with open(os.path.join(model_dir, file_name + ".tf_config.pkl"), "rb") as f: - _tf_config = pickle.load(f) - - graph = tf.Graph() - with graph.as_default(): - session = tf.Session(config=_tf_config) - saver = tf.train.import_meta_graph(checkpoint + ".meta") - - saver.restore(session, checkpoint) - - a_in = train_utils.load_tensor("message_placeholder") - b_in = train_utils.load_tensor("label_placeholder") - - sim_all = train_utils.load_tensor("similarity_all") - pred_confidence = train_utils.load_tensor("pred_confidence") - sim = train_utils.load_tensor("similarity") - - message_embed = train_utils.load_tensor("message_embed") - label_embed = train_utils.load_tensor("label_embed") - all_labels_embed = train_utils.load_tensor("all_labels_embed") - - with open( - os.path.join(model_dir, file_name + ".inv_label_dict.pkl"), "rb" - ) as f: - inv_label_dict = pickle.load(f) - - return cls( - component_config=meta, - inverted_label_dict=inv_label_dict, - session=session, - graph=graph, - message_placeholder=a_in, - label_placeholder=b_in, - similarity_all=sim_all, - pred_confidence=pred_confidence, - similarity=sim, - message_embed=message_embed, - label_embed=label_embed, - all_labels_embed=all_labels_embed, - ) - - else: - logger.warning( - "Failed to load nlu model. Maybe path {} " - "doesn't exist" - "".format(os.path.abspath(model_dir)) - ) - return cls(component_config=meta) diff --git a/rasa/nlu/classifiers/fallback_classifier.py b/rasa/nlu/classifiers/fallback_classifier.py new file mode 100644 index 000000000000..e6ec20dd98a2 --- /dev/null +++ b/rasa/nlu/classifiers/fallback_classifier.py @@ -0,0 +1,118 @@ +import logging +from typing import Any, List, Type, Text, Dict, Union, Tuple, Optional + +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME +from rasa.core.constants import DEFAULT_NLU_FALLBACK_THRESHOLD +from rasa.nlu.classifiers.classifier import IntentClassifier +from rasa.nlu.components import Component +from rasa.nlu.training_data import Message +from rasa.nlu.constants import ( + INTENT_RANKING_KEY, + INTENT, + INTENT_CONFIDENCE_KEY, + INTENT_NAME_KEY, +) + +THRESHOLD_KEY = "threshold" +AMBIGUITY_THRESHOLD_KEY = "ambiguity_threshold" + +logger = logging.getLogger(__name__) + + +class FallbackClassifier(Component): + + # please make sure to update the docs when changing a default parameter + defaults = { + # If all intent confidence scores are beyond this threshold, set the current + # intent to `FALLBACK_INTENT_NAME` + THRESHOLD_KEY: DEFAULT_NLU_FALLBACK_THRESHOLD, + # If the confidence scores for the top two intent predictions are closer than + # `AMBIGUITY_THRESHOLD_KEY`, then `FALLBACK_INTENT_NAME ` is predicted. + AMBIGUITY_THRESHOLD_KEY: 0.1, + } + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [IntentClassifier] + + def process(self, message: Message, **kwargs: Any) -> None: + """Process an incoming message. + + This is the components chance to process an incoming + message. The component can rely on + any context attribute to be present, that gets created + by a call to :meth:`rasa.nlu.components.Component.create` + of ANY component and + on any context attributes created by a call to + :meth:`rasa.nlu.components.Component.process` + of components previous to this one. + + Args: + message: The :class:`rasa.nlu.training_data.message.Message` to process. + + """ + + if not self._should_fallback(message): + return + + message.data[INTENT] = _fallback_intent() + message.data[INTENT_RANKING_KEY].insert(0, _fallback_intent()) + + def _should_fallback(self, message: Message) -> bool: + """Check if the fallback intent should be predicted. + + Args: + message: The current message and its intent predictions. + + Returns: + `True` if the fallback intent should be predicted. + """ + intent_name = message.data[INTENT].get(INTENT_NAME_KEY) + below_threshold, nlu_confidence = self._nlu_confidence_below_threshold(message) + + if below_threshold: + logger.debug( + f"NLU confidence {nlu_confidence} for intent '{intent_name}' is lower " + f"than NLU threshold {self.component_config[THRESHOLD_KEY]:.2f}." + ) + return True + + ambiguous_prediction, confidence_delta = self._nlu_prediction_ambiguous(message) + if ambiguous_prediction: + logger.debug( + f"The difference in NLU confidences " + f"for the top two intents ({confidence_delta}) is lower than " + f"the ambiguity threshold " + f"{self.component_config[AMBIGUITY_THRESHOLD_KEY]:.2f}. Predicting " + f"intent '{DEFAULT_NLU_FALLBACK_INTENT_NAME}' instead of " + f"'{intent_name}'." + ) + return True + + return False + + def _nlu_confidence_below_threshold(self, message: Message) -> Tuple[bool, float]: + nlu_confidence = message.data[INTENT].get(INTENT_CONFIDENCE_KEY) + return nlu_confidence < self.component_config[THRESHOLD_KEY], nlu_confidence + + def _nlu_prediction_ambiguous( + self, message: Message + ) -> Tuple[bool, Optional[float]]: + intents = message.data.get(INTENT_RANKING_KEY, []) + if len(intents) >= 2: + first_confidence = intents[0].get(INTENT_CONFIDENCE_KEY, 1.0) + second_confidence = intents[1].get(INTENT_CONFIDENCE_KEY, 1.0) + difference = first_confidence - second_confidence + return ( + difference < self.component_config[AMBIGUITY_THRESHOLD_KEY], + difference, + ) + return False, None + + +def _fallback_intent() -> Dict[Text, Union[Text, float]]: + return { + INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME, + # TODO: Re-consider how we represent the confidence here + INTENT_CONFIDENCE_KEY: 1.0, + } diff --git a/rasa/nlu/classifiers/keyword_intent_classifier.py b/rasa/nlu/classifiers/keyword_intent_classifier.py index 092465a36f15..b41af3d6bba6 100644 --- a/rasa/nlu/classifiers/keyword_intent_classifier.py +++ b/rasa/nlu/classifiers/keyword_intent_classifier.py @@ -1,32 +1,162 @@ -from typing import Any, Optional, Text +import os +import logging +import re +from typing import Any, Dict, Optional, Text -from rasa.nlu.components import Component +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.nlu import utils +from rasa.nlu.classifiers.classifier import IntentClassifier +from rasa.nlu.constants import INTENT +from rasa.utils.common import raise_warning +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import TrainingData +from rasa.nlu.model import Metadata from rasa.nlu.training_data import Message +logger = logging.getLogger(__name__) -class KeywordIntentClassifier(Component): - provides = ["intent"] +class KeywordIntentClassifier(IntentClassifier): + """Intent classifier using simple keyword matching. - his = ["hello", "hi", "hey"] - byes = ["bye", "goodbye"] + The classifier takes a list of keywords and associated intents as an input. + An input sentence is checked for the keywords and the intent is returned. + + """ + + defaults = {"case_sensitive": True} + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + intent_keyword_map: Optional[Dict] = None, + ): + + super(KeywordIntentClassifier, self).__init__(component_config) + + self.case_sensitive = self.component_config.get("case_sensitive") + self.intent_keyword_map = intent_keyword_map or {} + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + + duplicate_examples = set() + for ex in training_data.training_examples: + if ( + ex.text in self.intent_keyword_map.keys() + and ex.get(INTENT) != self.intent_keyword_map[ex.text] + ): + duplicate_examples.add(ex.text) + raise_warning( + f"Keyword '{ex.text}' is a keyword to trigger intent " + f"'{self.intent_keyword_map[ex.text]}' and also " + f"intent '{ex.get(INTENT)}', it will be removed " + f"from the list of keywords for both of them. " + f"Remove (one of) the duplicates from the training data.", + docs=DOCS_URL_COMPONENTS + "#keyword-intent-classifier", + ) + else: + self.intent_keyword_map[ex.text] = ex.get(INTENT) + for keyword in duplicate_examples: + self.intent_keyword_map.pop(keyword) + logger.debug( + f"Removed '{keyword}' from the list of keywords because it was " + "a keyword for more than one intent." + ) + + self._validate_keyword_map() + + def _validate_keyword_map(self) -> None: + re_flag = 0 if self.case_sensitive else re.IGNORECASE + + ambiguous_mappings = [] + for keyword1, intent1 in self.intent_keyword_map.items(): + for keyword2, intent2 in self.intent_keyword_map.items(): + if ( + re.search(r"\b" + keyword1 + r"\b", keyword2, flags=re_flag) + and intent1 != intent2 + ): + ambiguous_mappings.append((intent1, keyword1)) + raise_warning( + f"Keyword '{keyword1}' is a keyword of intent '{intent1}', " + f"but also a substring of '{keyword2}', which is a " + f"keyword of intent '{intent2}." + f" '{keyword1}' will be removed from the list of keywords.\n" + f"Remove (one of) the conflicting keywords from the" + f" training data.", + docs=DOCS_URL_COMPONENTS + "#keyword-intent-classifier", + ) + for intent, keyword in ambiguous_mappings: + self.intent_keyword_map.pop(keyword) + logger.debug( + f"Removed keyword '{keyword}' from intent '{intent}' because it matched a " + "keyword of another intent." + ) def process(self, message: Message, **kwargs: Any) -> None: + intent_name = self._map_keyword_to_intent(message.text) + + confidence = 0.0 if intent_name is None else 1.0 + intent = {"name": intent_name, "confidence": confidence} + + if message.get(INTENT) is None or intent is not None: + message.set(INTENT, intent, add_to_output=True) + + def _map_keyword_to_intent(self, text: Text) -> Optional[Text]: + re_flag = 0 if self.case_sensitive else re.IGNORECASE + + for keyword, intent in self.intent_keyword_map.items(): + if re.search(r"\b" + keyword + r"\b", text, flags=re_flag): + logger.debug( + f"KeywordClassifier matched keyword '{keyword}' to" + f" intent '{intent}'." + ) + return intent + + logger.debug("KeywordClassifier did not find any keywords in the message.") + return None + + def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: + """Persist this model into the passed directory. - intent = {"name": self.parse(message.text), "confidence": 1.0} - message.set("intent", intent, add_to_output=True) + Return the metadata necessary to load the model again. + """ - def parse(self, text: Text) -> Optional[Text]: + file_name = file_name + ".json" + keyword_file = os.path.join(model_dir, file_name) + utils.write_json_to_file(keyword_file, self.intent_keyword_map) - _text = text.lower() + return {"file": file_name} - def is_present(x): - return x in _text + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Optional[Text] = None, + model_metadata: Metadata = None, + cached_component: Optional["KeywordIntentClassifier"] = None, + **kwargs: Any, + ) -> "KeywordIntentClassifier": - if any(map(is_present, self.his)): - return "greet" - elif any(map(is_present, self.byes)): - return "goodbye" + if model_dir and meta.get("file"): + file_name = meta.get("file") + keyword_file = os.path.join(model_dir, file_name) + if os.path.exists(keyword_file): + intent_keyword_map = utils.read_json_file(keyword_file) + else: + raise_warning( + f"Failed to load key word file for `IntentKeywordClassifier`, " + f"maybe {keyword_file} does not exist?" + ) + intent_keyword_map = None + return cls(meta, intent_keyword_map) else: - return None + raise Exception( + f"Failed to load keyword intent classifier model. " + f"Path {os.path.abspath(meta.get('file'))} doesn't exist." + ) diff --git a/rasa/nlu/classifiers/mitie_intent_classifier.py b/rasa/nlu/classifiers/mitie_intent_classifier.py index a52428158dfa..0ee0b2808656 100644 --- a/rasa/nlu/classifiers/mitie_intent_classifier.py +++ b/rasa/nlu/classifiers/mitie_intent_classifier.py @@ -1,28 +1,31 @@ import os import typing -from typing import Any, Dict, List, Optional, Text +from typing import Any, Dict, List, Optional, Text, Type +from rasa.nlu.utils.mitie_utils import MitieNLP +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.classifiers.classifier import IntentClassifier from rasa.nlu.components import Component from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Metadata +from rasa.nlu.constants import TOKENS_NAMES, TEXT, INTENT from rasa.nlu.training_data import Message, TrainingData if typing.TYPE_CHECKING: import mitie -class MitieIntentClassifier(Component): - - provides = ["intent"] - - requires = ["tokens", "mitie_feature_extractor", "mitie_file"] +class MitieIntentClassifier(IntentClassifier): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [MitieNLP, Tokenizer] def __init__( self, component_config: Optional[Dict[Text, Any]] = None, clf=None ) -> None: """Construct a new intent classifier using the MITIE framework.""" - super(MitieIntentClassifier, self).__init__(component_config) + super().__init__(component_config) self.clf = clf @@ -31,7 +34,10 @@ def required_packages(cls) -> List[Text]: return ["mitie"] def train( - self, training_data: TrainingData, cfg: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: import mitie @@ -48,7 +54,7 @@ def train( for example in training_data.intent_examples: tokens = self._tokens_of_message(example) - trainer.add_labeled_text(tokens, example.get("intent")) + trainer.add_labeled_text(tokens, example.get(INTENT)) if training_data.intent_examples: # we can not call train if there are no examples! @@ -77,8 +83,8 @@ def process(self, message: Message, **kwargs: Any) -> None: ) @staticmethod - def _tokens_of_message(message): - return [token.text for token in message.get("tokens", [])] + def _tokens_of_message(message) -> List[Text]: + return [token.text for token in message.get(TOKENS_NAMES[TEXT], [])] @classmethod def load( @@ -87,7 +93,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional[Metadata] = None, cached_component: Optional["MitieIntentClassifier"] = None, - **kwargs: Any + **kwargs: Any, ) -> "MitieIntentClassifier": import mitie diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index a562f4f8077a..44a875060f74 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -1,16 +1,22 @@ import logging -import numpy as np import os import typing -from typing import Any, Dict, List, Optional, Text, Tuple +import warnings +from typing import Any, Dict, List, Optional, Text, Tuple, Type + +import numpy as np -from rasa.nlu import utils +import rasa.utils.io as io_utils +from rasa.constants import DOCS_URL_TRAINING_DATA_NLU from rasa.nlu.classifiers import LABEL_RANKING_LENGTH +from rasa.nlu.featurizers.featurizer import DenseFeaturizer from rasa.nlu.components import Component +from rasa.nlu.classifiers.classifier import IntentClassifier from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.constants import TEXT from rasa.nlu.model import Metadata from rasa.nlu.training_data import Message, TrainingData -from rasa.nlu.constants import MESSAGE_VECTOR_FEATURE_NAMES, MESSAGE_TEXT_ATTRIBUTE +import rasa.utils.common as common_utils logger = logging.getLogger(__name__) @@ -18,12 +24,12 @@ import sklearn -class SklearnIntentClassifier(Component): +class SklearnIntentClassifier(IntentClassifier): """Intent classifier using the sklearn framework""" - provides = ["intent", "intent_ranking"] - - requires = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]] + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [DenseFeaturizer] defaults = { # C parameter of the svm - cross validation will select the best value @@ -43,14 +49,14 @@ class SklearnIntentClassifier(Component): def __init__( self, - component_config: Dict[Text, Any] = None, + component_config: Optional[Dict[Text, Any]] = None, clf: "sklearn.model_selection.GridSearchCV" = None, le: Optional["sklearn.preprocessing.LabelEncoder"] = None, ) -> None: """Construct a new intent classifier using the sklearn framework.""" from sklearn.preprocessing import LabelEncoder - super(SklearnIntentClassifier, self).__init__(component_config) + super().__init__(component_config) if le is not None: self.le = le @@ -77,7 +83,10 @@ def transform_labels_num2str(self, y: np.ndarray) -> np.ndarray: return self.le.inverse_transform(y) def train( - self, training_data: TrainingData, cfg: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: """Train the intent classifier on a data set.""" @@ -86,29 +95,44 @@ def train( labels = [e.get("intent") for e in training_data.intent_examples] if len(set(labels)) < 2: - logger.warning( - "Can not train an intent classifier. " - "Need at least 2 different classes. " - "Skipping training of intent classifier." + common_utils.raise_warning( + "Can not train an intent classifier as there are not " + "enough intents. Need at least 2 different intents. " + "Skipping training of intent classifier.", + docs=DOCS_URL_TRAINING_DATA_NLU, ) else: y = self.transform_labels_str2num(labels) X = np.stack( [ - example.get("text_features") + self._get_sentence_features(example) for example in training_data.intent_examples ] ) + # reduce dimensionality + X = np.reshape(X, (len(X), -1)) self.clf = self._create_classifier(num_threads, y) - self.clf.fit(X, y) + with warnings.catch_warnings(): + # sklearn raises lots of + # "UndefinedMetricWarning: F - score is ill - defined" + # if there are few intent examples, this is needed to prevent it + warnings.simplefilter("ignore") + self.clf.fit(X, y) - def _num_cv_splits(self, y): + @staticmethod + def _get_sentence_features(message: Message) -> np.ndarray: + _, sentence_features = message.get_dense_features(TEXT) + return sentence_features[0] + + def _num_cv_splits(self, y) -> int: folds = self.component_config["max_cross_validation_folds"] return max(2, min(folds, np.min(np.bincount(y)) // 5)) - def _create_classifier(self, num_threads, y): + def _create_classifier( + self, num_threads: int, y + ) -> "sklearn.model_selection.GridSearchCV": from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC @@ -132,6 +156,7 @@ def _create_classifier(self, num_threads, y): cv=cv_splits, scoring=self.component_config["scoring_function"], verbose=1, + iid=False, ) def process(self, message: Message, **kwargs: Any) -> None: @@ -143,7 +168,8 @@ def process(self, message: Message, **kwargs: Any) -> None: intent = None intent_ranking = [] else: - X = message.get("text_features").reshape(1, -1) + X = self._get_sentence_features(message).reshape(1, -1) + intent_ids, probabilities = self.predict(X) intents = self.transform_labels_num2str(np.ravel(intent_ids)) # `predict` returns a matrix as it is supposed @@ -199,10 +225,10 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] classifier_file_name = file_name + "_classifier.pkl" encoder_file_name = file_name + "_encoder.pkl" if self.clf and self.le: - utils.json_pickle( + io_utils.json_pickle( os.path.join(model_dir, encoder_file_name), self.le.classes_ ) - utils.json_pickle( + io_utils.json_pickle( os.path.join(model_dir, classifier_file_name), self.clf.best_estimator_ ) return {"classifier": classifier_file_name, "encoder": encoder_file_name} @@ -214,7 +240,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional[Metadata] = None, cached_component: Optional["SklearnIntentClassifier"] = None, - **kwargs: Any + **kwargs: Any, ) -> "SklearnIntentClassifier": from sklearn.preprocessing import LabelEncoder @@ -222,8 +248,8 @@ def load( encoder_file = os.path.join(model_dir, meta.get("encoder")) if os.path.exists(classifier_file): - classifier = utils.json_unpickle(classifier_file) - classes = utils.json_unpickle(encoder_file) + classifier = io_utils.json_unpickle(classifier_file) + classes = io_utils.json_unpickle(encoder_file) encoder = LabelEncoder() encoder.classes_ = classes return cls(meta, classifier, encoder) diff --git a/rasa/nlu/components.py b/rasa/nlu/components.py index cd3a048cd82c..6df9778eb822 100644 --- a/rasa/nlu/components.py +++ b/rasa/nlu/components.py @@ -1,11 +1,13 @@ +import itertools import logging import typing -from typing import Any, Dict, Hashable, List, Optional, Set, Text, Tuple -import warnings +from typing import Any, Dict, Hashable, List, Optional, Set, Text, Tuple, Type, Iterable -from rasa.nlu.config import RasaNLUModelConfig, override_defaults -from rasa.nlu.training_data import TrainingData, Message -from rasa.nlu.constants import MESSAGE_RESPONSE_ATTRIBUTE +from rasa.constants import DOCS_URL_MIGRATION_GUIDE +from rasa.nlu.constants import TRAINABLE_EXTRACTORS +from rasa.nlu.config import RasaNLUModelConfig, override_defaults, InvalidConfigError +from rasa.nlu.training_data import Message, TrainingData +from rasa.utils.common import raise_warning if typing.TYPE_CHECKING: from rasa.nlu.model import Metadata @@ -14,8 +16,15 @@ def find_unavailable_packages(package_names: List[Text]) -> Set[Text]: - """Tries to import all the package names and returns - the packages where it failed.""" + """Tries to import all package names and returns the packages where it failed. + + Args: + package_names: The package names to import. + + Returns: + Package names that could not be imported. + """ + import importlib failed_imports = set() @@ -28,8 +37,12 @@ def find_unavailable_packages(package_names: List[Text]) -> Set[Text]: def validate_requirements(component_names: List[Text]) -> None: - """Ensures that all required importable python packages are installed to - instantiate and used the passed components.""" + """Validates that all required importable python packages are installed. + + Args: + component_names: The list of component names. + """ + from rasa.nlu import registry # Validate that all required packages are installed @@ -43,73 +56,254 @@ def validate_requirements(component_names: List[Text]) -> None: # if available, use the development file to figure out the correct # version numbers for each requirement raise Exception( - "Not all required importable packages are installed. " - + "To use this pipeline, you need to install the " - "missing dependencies. " - + "Please install the package(s) that contain the module(s): {}".format( - ", ".join(failed_imports) - ) + f"Not all required importable packages are installed. " + f"To use this pipeline, you need to install the " + f"missing dependencies. " + f"Please install the package(s) that contain the module(s): " + f"{', '.join(failed_imports)}" ) -def validate_arguments( - pipeline: List["Component"], - context: Dict[Text, Any], - allow_empty_pipeline: bool = False, -) -> None: - """Validates a pipeline before it is run. Ensures, that all - arguments are present to train the pipeline.""" +def validate_empty_pipeline(pipeline: List["Component"]) -> None: + """Ensures the pipeline is not empty. + + Args: + pipeline: the list of the :class:`rasa.nlu.components.Component`. + """ - # Ensure the pipeline is not empty - if not allow_empty_pipeline and len(pipeline) == 0: - raise ValueError( + if len(pipeline) == 0: + raise InvalidConfigError( "Can not train an empty pipeline. " "Make sure to specify a proper pipeline in " - "the configuration using the `pipeline` key." - + "The `backend` configuration key is " + "the configuration using the 'pipeline' key. " + "The 'backend' configuration key is " "NOT supported anymore." ) - provided_properties = set(context.keys()) +def validate_only_one_tokenizer_is_used(pipeline: List["Component"]) -> None: + """Validates that only one tokenizer is present in the pipeline. + + Args: + pipeline: the list of the :class:`rasa.nlu.components.Component`. + """ + + from rasa.nlu.tokenizers.tokenizer import Tokenizer + + tokenizer_names = [] for component in pipeline: - for r in component.requires: - if r not in provided_properties: - raise Exception( - "Failed to validate at component " - "'{}'. Missing property: '{}'" - "".format(component.name, r) - ) - provided_properties.update(component.provides) + if isinstance(component, Tokenizer): + tokenizer_names.append(component.name) + + if len(tokenizer_names) > 1: + raise InvalidConfigError( + f"More than one tokenizer is used: {tokenizer_names}. " + f"You can use only one tokenizer." + ) + + +def _required_component_in_pipeline( + required_component: Type["Component"], pipeline: List["Component"] +) -> bool: + """Checks that required component present in the pipeline. + + Args: + required_component: A class name of the required component. + pipeline: The list of the :class:`rasa.nlu.components.Component`. + + Returns: + `True` if required_component is in the pipeline, `False` otherwise. + """ + + for previous_component in pipeline: + if isinstance(previous_component, required_component): + return True + return False + + +def _check_deprecated_attributes(component: "Component") -> None: + """Checks that the component doesn't have deprecated attributes. + + Args: + component: The :class:`rasa.nlu.components.Component`. + """ + + if hasattr(component, "provides"): + raise_warning( + f"'{component.name}' contains property 'provides', " + f"which is deprecated. There is no need to specify " + f"the list of attributes that a component provides.", + category=FutureWarning, + docs=DOCS_URL_MIGRATION_GUIDE, + ) + if hasattr(component, "requires"): + raise_warning( + f"'{component.name}' contains property 'requires', " + f"which is deprecated. Use 'required_components()' method " + f"to specify which components are required to be present " + f"in the pipeline by this component.", + category=FutureWarning, + docs=DOCS_URL_MIGRATION_GUIDE, + ) + + +def validate_required_components(pipeline: List["Component"]) -> None: + """Validates that all required components are present in the pipeline. + + Args: + pipeline: The list of the :class:`rasa.nlu.components.Component`. + """ + + for i, component in enumerate(pipeline): + _check_deprecated_attributes(component) + + missing_components = [] + for required_component in component.required_components(): + if not _required_component_in_pipeline(required_component, pipeline[:i]): + missing_components.append(required_component.name) + + if missing_components: + raise InvalidConfigError( + f"'{component.name}' requires {missing_components}. " + f"Add required components to the pipeline." + ) + + +def validate_pipeline(pipeline: List["Component"]) -> None: + """Validates the pipeline. + + Args: + pipeline: The list of the :class:`rasa.nlu.components.Component`. + """ + + validate_empty_pipeline(pipeline) + validate_only_one_tokenizer_is_used(pipeline) + validate_required_components(pipeline) + + +def any_components_in_pipeline(components: Iterable[Text], pipeline: List["Component"]): + """Check if any of the provided components are listed in the pipeline. + + Args: + components: A list of :class:`rasa.nlu.components.Component`s to check. + pipeline: A list of :class:`rasa.nlu.components.Component`s. + + Returns: + `True` if any of the `components` are in the `pipeline`, else `False`. + + """ + return any(any(component.name == c for component in pipeline) for c in components) def validate_required_components_from_data( pipeline: List["Component"], data: TrainingData -): +) -> None: + """Validates that all components are present in the pipeline based on data. - response_selector_exists = False - for component in pipeline: - # check if a response selector is part of NLU pipeline - if MESSAGE_RESPONSE_ATTRIBUTE in component.provides: - response_selector_exists = True - - if len(data.response_examples) and not response_selector_exists: - warnings.warn( - "Training data consists examples for training a response selector but " - "no response selector component specified inside NLU pipeline" + Args: + pipeline: The list of the :class:`rasa.nlu.components.Component`s. + data: The :class:`rasa.nlu.training_data.training_data.TrainingData`. + """ + + if data.response_examples and not any_components_in_pipeline( + ["ResponseSelector"], pipeline + ): + raise_warning( + "You have defined training data with examples for training a response " + "selector, but your NLU pipeline does not include a response selector " + "component. To train a model on your response selector data, add a " + "'ResponseSelector' to your pipeline." + ) + + if data.entity_examples and not any_components_in_pipeline( + TRAINABLE_EXTRACTORS, pipeline + ): + raise_warning( + "You have defined training data consisting of entity examples, but " + "your NLU pipeline does not include an entity extractor trained on " + "your training data. To extract non-pretrained entities, add one of " + f"{TRAINABLE_EXTRACTORS} to your pipeline." + ) + + if data.entity_examples and not any_components_in_pipeline( + {"DIETClassifier", "CRFEntityExtractor"}, pipeline + ): + if data.entity_roles_groups_used(): + raise_warning( + "You have defined training data with entities that have roles/groups, " + "but your NLU pipeline does not include a 'DIETClassifier' or a " + "'CRFEntityExtractor'. To train entities that have roles/groups, " + "add either 'DIETClassifier' or 'CRFEntityExtractor' to your " + "pipeline." + ) + + if data.regex_features and not any_components_in_pipeline( + ["RegexFeaturizer", "RegexEntityExtractor"], pipeline + ): + raise_warning( + "You have defined training data with regexes, but " + "your NLU pipeline does not include a 'RegexFeaturizer' or a " + "'RegexEntityExtractor'. To use regexes, include either a " + "'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline." + ) + + if data.lookup_tables and not any_components_in_pipeline( + ["RegexFeaturizer", "RegexEntityExtractor"], pipeline + ): + raise_warning( + "You have defined training data consisting of lookup tables, but " + "your NLU pipeline does not include a 'RegexFeaturizer' or a " + "'RegexEntityExtractor'. To use lookup tables, include either a " + "'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline." + ) + + if data.lookup_tables: + if not any_components_in_pipeline( + ["CRFEntityExtractor", "DIETClassifier"], pipeline + ): + raise_warning( + "You have defined training data consisting of lookup tables, but " + "your NLU pipeline does not include any components that use these " + "features. To make use of lookup tables, add a 'DIETClassifier' or a " + "'CRFEntityExtractor' with the 'pattern' feature to your pipeline." + ) + elif any_components_in_pipeline(["CRFEntityExtractor"], pipeline): + crf_components = [c for c in pipeline if c.name == "CRFEntityExtractor"] + # check to see if any of the possible CRFEntityExtractors will + # featurize `pattern` + has_pattern_feature = False + for crf in crf_components: + crf_features = crf.component_config.get("features") + # iterate through [[before],[word],[after]] features + has_pattern_feature = "pattern" in itertools.chain(*crf_features) + + if not has_pattern_feature: + raise_warning( + "You have defined training data consisting of lookup tables, but " + "your NLU pipeline's 'CRFEntityExtractor' does not include the " + "'pattern' feature. To featurize lookup tables, add the 'pattern' " + "feature to the 'CRFEntityExtractor' in your pipeline." + ) + + if data.entity_synonyms and not any_components_in_pipeline( + ["EntitySynonymMapper"], pipeline + ): + raise_warning( + "You have defined synonyms in your training data, but " + "your NLU pipeline does not include an 'EntitySynonymMapper'. " + "To map synonyms, add an 'EntitySynonymMapper' to your pipeline." ) class MissingArgumentError(ValueError): - """Raised when a function is called and not all parameters can be - filled from the context / config. + """Raised when not all parameters can be filled from the context / config. Attributes: message -- explanation of which parameter is missing """ def __init__(self, message: Text) -> None: - super(MissingArgumentError, self).__init__(message) + super().__init__(message) self.message = message def __str__(self) -> Text: @@ -128,16 +322,16 @@ def __init__(self, component: Text, language: Text) -> None: self.component = component self.language = language - super(UnsupportedLanguageError, self).__init__(component, language) + super().__init__(component, language) def __str__(self) -> Text: - return "component {} does not support language {}".format( - self.component, self.language + return ( + f"component '{self.component}' does not support language '{self.language}'." ) class ComponentMetaclass(type): - """Metaclass with `name` class property""" + """Metaclass with `name` class property.""" @property def name(cls): @@ -146,7 +340,7 @@ def name(cls): return cls.__name__ -class Component(object, metaclass=ComponentMetaclass): +class Component(metaclass=ComponentMetaclass): """A component is a message processing unit in a pipeline. Components are collected sequentially in a pipeline. Each component @@ -165,7 +359,8 @@ class Component(object, metaclass=ComponentMetaclass): components a component can use to do its own processing. For example, a featurizer component can provide features that are used by another component down - the pipeline to do intent classification.""" + the pipeline to do intent classification. + """ # Component class name is used when integrating it in a # pipeline. E.g. ``[ComponentA, ComponentB]`` @@ -177,18 +372,17 @@ def name(self): return type(self).name - # Defines what attributes the pipeline component will - # provide when called. The listed attributes - # should be set by the component on the message object - # during test and train, e.g. - # ```message.set("entities", [...])``` - provides = [] + # Which components are required by this component. + # Listed components should appear before the component itself in the pipeline. + @classmethod + def required_components(cls) -> List[Type["Component"]]: + """Specify which components need to be present in the pipeline. - # Which attributes on a message are required by this - # component. e.g. if requires contains "tokens", than a - # previous component in the pipeline needs to have "tokens" - # within the above described `provides` property. - requires = [] + Returns: + The list of class names of required components. + """ + + return [] # Defines the default configuration parameters of a component # these values can be overwritten in the pipeline configuration @@ -200,7 +394,13 @@ def name(self): # This attribute is designed for instance method: `can_handle_language`. # Default value is None which means it can handle all languages. # This is an important feature for backwards compatibility of components. - language_list = None + supported_language_list = None + + # Defines what language(s) this component can NOT handle. + # This attribute is designed for instance method: `can_handle_language`. + # Default value is None which means it can handle all languages. + # This is an important feature for backwards compatibility of components. + not_supported_language_list = None def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: @@ -218,13 +418,19 @@ def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: @classmethod def required_packages(cls) -> List[Text]: - """Specify which python packages need to be installed to use this - component, e.g. ``["spacy"]``. More specifically, these should be + """Specify which python packages need to be installed. + + E.g. ``["spacy"]``. More specifically, these should be importable python package names e.g. `sklearn` and not package names in the dependencies sense e.g. `scikit-learn` This list of requirements allows us to fail early during training - if a required package is not installed.""" + if a required package is not installed. + + Returns: + The list of required package names. + """ + return [] @classmethod @@ -234,7 +440,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional["Metadata"] = None, cached_component: Optional["Component"] = None, - **kwargs: Any + **kwargs: Any, ) -> "Component": """Load this component from file. @@ -243,12 +449,22 @@ def load( this component needs to be able to restore itself. Components can rely on any context attributes that are created by :meth:`components.Component.create` - calls to components previous - to this one.""" + calls to components previous to this one. + + Args: + meta: Any configuration parameter related to the model. + model_dir: The directory to load the component from. + model_metadata: The model's :class:`rasa.nlu.model.Metadata`. + cached_component: The cached component. + + Returns: + the loaded component + """ + if cached_component: return cached_component - else: - return cls(meta) + + return cls(meta) @classmethod def create( @@ -256,7 +472,15 @@ def create( ) -> "Component": """Creates this component (e.g. before a training is started). - Method can access all configuration parameters.""" + Method can access all configuration parameters. + + Args: + component_config: The components configuration parameters. + config: The model configuration parameters. + + Returns: + The created component. + """ # Check language supporting language = config.language @@ -267,7 +491,7 @@ def create( return cls(component_config) def provide_context(self) -> Optional[Dict[Text, Any]]: - """Initialize this component for a new pipeline + """Initialize this component for a new pipeline. This function will be called before the training is started and before the first message is processed using @@ -277,11 +501,19 @@ def provide_context(self) -> Optional[Dict[Text, Any]]: components do not need to implement this method. It's mostly used to initialize framework environments like MITIE and spacy - (e.g. loading word vectors for the pipeline).""" + (e.g. loading word vectors for the pipeline). + + Returns: + The updated component configuration. + """ + pass def train( - self, training_data: TrainingData, cfg: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: """Train this component. @@ -292,7 +524,15 @@ def train( of ANY component and on any context attributes created by a call to :meth:`rasa.nlu.components.Component.train` - of components previous to this one.""" + of components previous to this one. + + Args: + training_data: + The :class:`rasa.nlu.training_data.training_data.TrainingData`. + config: The model configuration parameters. + + """ + pass def process(self, message: Message, **kwargs: Any) -> None: @@ -305,11 +545,25 @@ def process(self, message: Message, **kwargs: Any) -> None: of ANY component and on any context attributes created by a call to :meth:`rasa.nlu.components.Component.process` - of components previous to this one.""" + of components previous to this one. + + Args: + message: The :class:`rasa.nlu.training_data.message.Message` to process. + + """ + pass def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: - """Persist this component to disk for future loading.""" + """Persist this component to disk for future loading. + + Args: + file_name: The file name of the model. + model_dir: The directory to store the model to. + + Returns: + An optional dictionary with any information about the stored model. + """ pass @@ -322,7 +576,15 @@ def cache_key( If a component is unique to a model it should return None. Otherwise, an instantiation of the component will be reused for all models where the - metadata creates the same key.""" + metadata creates the same key. + + Args: + component_meta: The component configuration. + model_metadata: The component's :class:`rasa.nlu.model.Metadata`. + + Returns: + A unique caching key. + """ return None @@ -335,7 +597,7 @@ def __getstate__(self) -> Any: del d["partial_processing_pipeline"] return d - def __eq__(self, other): + def __eq__(self, other) -> bool: return self.__dict__ == other.__dict__ def prepare_partial_processing( @@ -346,7 +608,13 @@ def prepare_partial_processing( The pipeline should be a list of components that are previous to this one in the pipeline and have already finished their training (and can therefore - be safely used to process messages).""" + be safely used to process messages). + + Args: + pipeline: The list of components. + context: The context of processing. + + """ self.partial_processing_pipeline = pipeline self.partial_processing_context = context @@ -356,7 +624,15 @@ def partially_process(self, message: Message) -> Message: training (e.g. external training data). The passed message will be processed by all components - previous to this one in the pipeline.""" + previous to this one in the pipeline. + + Args: + message: The :class:`rasa.nlu.training_data.message.Message` to process. + + Returns: + The processed :class:`rasa.nlu.training_data.message.Message`. + + """ if self.partial_processing_context is not None: for component in self.partial_processing_pipeline: @@ -370,16 +646,32 @@ def can_handle_language(cls, language: Hashable) -> bool: """Check if component supports a specific language. This method can be overwritten when needed. (e.g. dynamically - determine which language is supported.)""" + determine which language is supported.) + + Args: + language: The language to check. + + Returns: + `True` if component can handle specific language, `False` otherwise. + """ # if language_list is set to `None` it means: support all languages - if language is None or cls.language_list is None: + if language is None or ( + cls.supported_language_list is None + and cls.not_supported_language_list is None + ): return True - return language in cls.language_list + language_list = cls.supported_language_list or [] + not_supported_language_list = cls.not_supported_language_list or [] + + return language in language_list or language not in not_supported_language_list + + +C = typing.TypeVar("C", bound=Component) -class ComponentBuilder(object): +class ComponentBuilder: """Creates trainers and interpreters based on configurations. Caches components for reuse. @@ -411,8 +703,8 @@ def __get_cached_component( and cache_key in self.component_cache ): return self.component_cache[cache_key], cache_key - else: - return None, cache_key + + return None, cache_key def __add_to_cache(self, component: Component, cache_key: Optional[Text]) -> None: """Add a component to the cache.""" @@ -420,8 +712,7 @@ def __add_to_cache(self, component: Component, cache_key: Optional[Text]) -> Non if cache_key is not None and self.use_cache: self.component_cache[cache_key] = component logger.info( - "Added '{}' to component cache. Key '{}'." - "".format(component.name, cache_key) + f"Added '{component.name}' to component cache. Key '{cache_key}'." ) def load_component( @@ -429,21 +720,23 @@ def load_component( component_meta: Dict[Text, Any], model_dir: Text, model_metadata: "Metadata", - **context: Any + **context: Any, ) -> Component: - """Tries to retrieve a component from the cache, else calls + """Loads a component. + + Tries to retrieve a component from the cache, else calls ``load`` to create a new component. Args: component_meta: - the metadata of the component to load in the pipeline + The metadata of the component to load in the pipeline. model_dir: - the directory to read the model from + The directory to read the model from. model_metadata (Metadata): - the model's :class:`rasa.nlu.model.Metadata` + The model's :class:`rasa.nlu.model.Metadata`. Returns: - Component: the loaded component. + The loaded component. """ from rasa.nlu import registry @@ -462,15 +755,26 @@ def load_component( return component except MissingArgumentError as e: # pragma: no cover raise Exception( - "Failed to load component from file `{}`. " - "{}".format(component_meta.get("file"), e) + f"Failed to load component from file '{component_meta.get('file')}'. " + f"Error: {e}" ) def create_component( self, component_config: Dict[Text, Any], cfg: RasaNLUModelConfig ) -> Component: - """Tries to retrieve a component from the cache, - calls `create` to create a new component.""" + """Creates a component. + + Tries to retrieve a component from the cache, + calls `create` to create a new component. + + Args: + component_config: The component configuration. + cfg: The model configuration. + + Returns: + The created component. + """ + from rasa.nlu import registry from rasa.nlu.model import Metadata @@ -484,6 +788,15 @@ def create_component( return component except MissingArgumentError as e: # pragma: no cover raise Exception( - "Failed to create component `{}`. " - "{}".format(component_config["name"], e) + f"Failed to create component '{component_config['name']}'. " + f"Error: {e}" ) + + def create_component_from_class(self, component_class: Type[C], **cfg: Any) -> C: + """Create a component based on a class and a configuration. + + Mainly used to make use of caching when instantiating component classes.""" + + component_config = {"name": component_class.name} + + return self.create_component(component_config, RasaNLUModelConfig(cfg)) diff --git a/rasa/nlu/config.py b/rasa/nlu/config.py index 412c885df838..462e743e730f 100644 --- a/rasa/nlu/config.py +++ b/rasa/nlu/config.py @@ -5,8 +5,13 @@ from typing import Any, Dict, List, Optional, Text, Union import rasa.utils.io -from rasa.constants import DEFAULT_CONFIG_PATH +from rasa.constants import ( + DEFAULT_CONFIG_PATH, + DOCS_URL_PIPELINE, + DOCS_URL_MIGRATION_GUIDE, +) from rasa.nlu.utils import json_to_string +import rasa.utils.common as common_utils logger = logging.getLogger(__name__) @@ -15,7 +20,7 @@ class InvalidConfigError(ValueError): """Raised if an invalid configuration is encountered.""" def __init__(self, message: Text) -> None: - super(InvalidConfigError, self).__init__(message) + super().__init__(message) def load( @@ -33,7 +38,7 @@ def load( file_config = rasa.utils.io.read_config_file(config) except yaml.parser.ParserError as e: raise InvalidConfigError( - "Failed to read configuration file '{}'. Error: {}".format(config, e) + f"Failed to read configuration file '{config}'. Error: {e}" ) return _load_from_dict(file_config, **kwargs) @@ -54,7 +59,12 @@ def override_defaults( cfg = {} if custom: - cfg.update(custom) + for key in custom.keys(): + if isinstance(cfg.get(key), dict): + cfg[key].update(custom[key]) + else: + cfg[key] = custom[key] + return cfg @@ -67,17 +77,17 @@ def component_config_from_pipeline( c = pipeline[index] return override_defaults(defaults, c) except IndexError: - logger.warning( - "Tried to get configuration value for component " - "number {} which is not part of the pipeline. " - "Returning `defaults`." - "".format(index) + common_utils.raise_warning( + f"Tried to get configuration value for component " + f"number {index} which is not part of your pipeline. " + f"Returning `defaults`.", + docs=DOCS_URL_PIPELINE, ) return override_defaults(defaults, {}) -class RasaNLUModelConfig(object): - def __init__(self, configuration_values=None): +class RasaNLUModelConfig: + def __init__(self, configuration_values: Optional[Dict[Text, Any]] = None) -> None: """Create a model configuration, optionally overriding defaults with a dictionary ``configuration_values``. """ @@ -94,96 +104,70 @@ def __init__(self, configuration_values=None): # replaces None with empty list self.__dict__["pipeline"] = [] elif isinstance(self.__dict__["pipeline"], str): - from rasa.nlu import registry - - template_name = self.__dict__["pipeline"] - new_names = { - "spacy_sklearn": "pretrained_embeddings_spacy", - "tensorflow_embedding": "supervised_embeddings", - } - if template_name in new_names: - logger.warning( - "You have specified the pipeline template " - "'{}' which has been renamed to '{}'. " - "Please update your code as it will no " - "longer work with future versions of " - "Rasa NLU.".format(template_name, new_names[template_name]) - ) - template_name = new_names[template_name] - - pipeline = registry.pipeline_template(template_name) - - if pipeline: - # replaces the template with the actual components - self.__dict__["pipeline"] = pipeline - else: - known_templates = ", ".join( - registry.registered_pipeline_templates.keys() - ) - - raise InvalidConfigError( - "No pipeline specified and unknown " - "pipeline template '{}' passed. Known " - "pipeline templates: {}" - "".format(template_name, known_templates) - ) + # DEPRECATION EXCEPTION - remove in 2.1 + raise Exception( + f"You are using a pipeline template. All pipelines templates " + f"have been removed in 2.0. Please add " + f"the components you want to use directly to your configuration " + f"file. {DOCS_URL_MIGRATION_GUIDE}" + ) for key, value in self.items(): setattr(self, key, value) - def __getitem__(self, key): + def __getitem__(self, key: Text) -> Any: return self.__dict__[key] - def get(self, key, default=None): + def get(self, key: Text, default: Any = None) -> Any: return self.__dict__.get(key, default) - def __setitem__(self, key, value): + def __setitem__(self, key: Text, value: Any) -> None: self.__dict__[key] = value - def __delitem__(self, key): + def __delitem__(self, key: Text) -> None: del self.__dict__[key] - def __contains__(self, key): + def __contains__(self, key: Text) -> bool: return key in self.__dict__ - def __len__(self): + def __len__(self) -> int: return len(self.__dict__) - def __getstate__(self): + def __getstate__(self) -> Dict[Text, Any]: return self.as_dict() - def __setstate__(self, state): + def __setstate__(self, state: Dict[Text, Any]) -> None: self.override(state) - def items(self): + def items(self) -> List[Any]: return list(self.__dict__.items()) - def as_dict(self): + def as_dict(self) -> Dict[Text, Any]: return dict(list(self.items())) - def view(self): + def view(self) -> Text: return json_to_string(self.__dict__, indent=4) - def for_component(self, index, defaults=None): + def for_component(self, index, defaults=None) -> Dict[Text, Any]: return component_config_from_pipeline(index, self.pipeline, defaults) @property - def component_names(self): + def component_names(self) -> List[Text]: if self.pipeline: return [c.get("name") for c in self.pipeline] else: return [] - def set_component_attr(self, index, **kwargs): + def set_component_attr(self, index, **kwargs) -> None: try: self.pipeline[index].update(kwargs) except IndexError: - logger.warning( - "Tried to set configuration value for component " - "number {} which is not part of the pipeline." - "".format(index) + common_utils.raise_warning( + f"Tried to set configuration value for component " + f"number {index} which is not part of the pipeline.", + docs=DOCS_URL_PIPELINE, ) - def override(self, config): + def override(self, config) -> None: if config: self.__dict__.update(config) diff --git a/rasa/nlu/constants.py b/rasa/nlu/constants.py index 549ff15b6daa..94741ee2207d 100644 --- a/rasa/nlu/constants.py +++ b/rasa/nlu/constants.py @@ -1,40 +1,73 @@ -MESSAGE_TEXT_ATTRIBUTE = "text" +TEXT = "text" +INTENT = "intent" +RESPONSE = "response" -MESSAGE_RESPONSE_KEY_ATTRIBUTE = "response_key" +RESPONSE_KEY_ATTRIBUTE = "response_key" -MESSAGE_INTENT_ATTRIBUTE = "intent" +ENTITIES = "entities" +BILOU_ENTITIES = "bilou_entities" +BILOU_ENTITIES_ROLE = "bilou_entities_role" +BILOU_ENTITIES_GROUP = "bilou_entities_group" +NO_ENTITY_TAG = "O" -MESSAGE_RESPONSE_ATTRIBUTE = "response" +ENTITY_ATTRIBUTE_TYPE = "entity" +ENTITY_ATTRIBUTE_GROUP = "group" +ENTITY_ATTRIBUTE_ROLE = "role" +ENTITY_ATTRIBUTE_VALUE = "value" +ENTITY_ATTRIBUTE_TEXT = "text" +ENTITY_ATTRIBUTE_START = "start" +ENTITY_ATTRIBUTE_END = "end" +ENTITY_ATTRIBUTE_CONFIDENCE = "confidence" +ENTITY_ATTRIBUTE_CONFIDENCE_TYPE = ( + f"{ENTITY_ATTRIBUTE_CONFIDENCE}_{ENTITY_ATTRIBUTE_TYPE}" +) +ENTITY_ATTRIBUTE_CONFIDENCE_GROUP = ( + f"{ENTITY_ATTRIBUTE_CONFIDENCE}_{ENTITY_ATTRIBUTE_GROUP}" +) +ENTITY_ATTRIBUTE_CONFIDENCE_ROLE = ( + f"{ENTITY_ATTRIBUTE_CONFIDENCE}_{ENTITY_ATTRIBUTE_ROLE}" +) -MESSAGE_ENTITIES_ATTRIBUTE = "entities" +EXTRACTOR = "extractor" -MESSAGE_ATTRIBUTES = [ - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_RESPONSE_ATTRIBUTE, -] +PRETRAINED_EXTRACTORS = {"DucklingHTTPExtractor", "SpacyEntityExtractor"} +TRAINABLE_EXTRACTORS = {"MitieEntityExtractor", "CRFEntityExtractor", "DIETClassifier"} -MESSAGE_TOKENS_NAMES = { - MESSAGE_TEXT_ATTRIBUTE: "tokens", - MESSAGE_INTENT_ATTRIBUTE: "intent_tokens", - MESSAGE_RESPONSE_ATTRIBUTE: "response_tokens", -} +NUMBER_OF_SUB_TOKENS = "number_of_sub_tokens" + +MESSAGE_ATTRIBUTES = [TEXT, INTENT, RESPONSE] +DENSE_FEATURIZABLE_ATTRIBUTES = [TEXT, RESPONSE] -MESSAGE_VECTOR_FEATURE_NAMES = { - MESSAGE_TEXT_ATTRIBUTE: "text_features", - MESSAGE_INTENT_ATTRIBUTE: "intent_features", - MESSAGE_RESPONSE_ATTRIBUTE: "response_features", +LANGUAGE_MODEL_DOCS = { + TEXT: "text_language_model_doc", + RESPONSE: "response_language_model_doc", } +SPACY_DOCS = {TEXT: "text_spacy_doc", RESPONSE: "response_spacy_doc"} -MESSAGE_SPACY_FEATURES_NAMES = { - MESSAGE_TEXT_ATTRIBUTE: "spacy_doc", - MESSAGE_RESPONSE_ATTRIBUTE: "response_spacy_doc", +TOKENS_NAMES = { + TEXT: "text_tokens", + INTENT: "intent_tokens", + RESPONSE: "response_tokens", } -SPACY_FEATURIZABLE_ATTRIBUTES = [MESSAGE_TEXT_ATTRIBUTE, MESSAGE_RESPONSE_ATTRIBUTE] +TOKENS = "tokens" +TOKEN_IDS = "token_ids" + +SEQUENCE_FEATURES = "sequence_features" +SENTENCE_FEATURES = "sentence_features" -MESSAGE_SELECTOR_PROPERTY_NAME = "response_selector" +RESPONSE_SELECTOR_PROPERTY_NAME = "response_selector" DEFAULT_OPEN_UTTERANCE_TYPE = "default" OPEN_UTTERANCE_PREDICTION_KEY = "response" OPEN_UTTERANCE_RANKING_KEY = "ranking" RESPONSE_IDENTIFIER_DELIMITER = "/" + +INTENT_RANKING_KEY = "intent_ranking" +INTENT_CONFIDENCE_KEY = "confidence" +INTENT_NAME_KEY = "name" + +FEATURE_TYPE_SENTENCE = "sentence" +FEATURE_TYPE_SEQUENCE = "sequence" +VALID_FEATURE_TYPES = [FEATURE_TYPE_SEQUENCE, FEATURE_TYPE_SENTENCE] + +FEATURIZER_CLASS_ALIAS = "alias" diff --git a/rasa/nlu/emulators/dialogflow.py b/rasa/nlu/emulators/dialogflow.py index 6a998b515365..bfe9a8a44685 100644 --- a/rasa/nlu/emulators/dialogflow.py +++ b/rasa/nlu/emulators/dialogflow.py @@ -2,13 +2,14 @@ from datetime import datetime from typing import Any, Dict, Text +from rasa.nlu.constants import INTENT_NAME_KEY from rasa.nlu.emulators.no_emulator import NoEmulator class DialogflowEmulator(NoEmulator): def __init__(self) -> None: - super(DialogflowEmulator, self).__init__() + super().__init__() self.name = "api" def normalise_response_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: @@ -16,8 +17,7 @@ def normalise_response_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: # populate entities dict entities = { - entity_type: [] - for entity_type in set([x["entity"] for x in data["entities"]]) + entity_type: [] for entity_type in {x["entity"] for x in data["entities"]} } for entity in data["entities"]: @@ -29,7 +29,7 @@ def normalise_response_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: "result": { "source": "agent", "resolvedQuery": data["text"], - "action": data["intent"]["name"], + "action": data["intent"][INTENT_NAME_KEY], "actionIncomplete": False, "parameters": entities, "contexts": [], diff --git a/rasa/nlu/emulators/luis.py b/rasa/nlu/emulators/luis.py index 529695458a80..30c01497ff08 100644 --- a/rasa/nlu/emulators/luis.py +++ b/rasa/nlu/emulators/luis.py @@ -1,15 +1,16 @@ from typing import Any, Dict, Text from rasa.nlu.emulators.no_emulator import NoEmulator +from typing import List, Optional class LUISEmulator(NoEmulator): def __init__(self) -> None: - super(LUISEmulator, self).__init__() + super().__init__() self.name = "luis" - def _top_intent(self, data): + def _top_intent(self, data) -> Optional[Dict[Text, Any]]: if data.get("intent"): return { "intent": data["intent"]["name"], @@ -18,7 +19,7 @@ def _top_intent(self, data): else: return None - def _ranking(self, data): + def _ranking(self, data) -> List[Dict[Text, Any]]: if data.get("intent_ranking"): return [ {"intent": el["name"], "score": el["confidence"]} diff --git a/rasa/nlu/emulators/no_emulator.py b/rasa/nlu/emulators/no_emulator.py index b0249bdac4f0..8e2b68b6078f 100644 --- a/rasa/nlu/emulators/no_emulator.py +++ b/rasa/nlu/emulators/no_emulator.py @@ -1,7 +1,7 @@ from typing import Any, Dict, Text -class NoEmulator(object): +class NoEmulator: def __init__(self) -> None: self.name = None diff --git a/rasa/nlu/emulators/wit.py b/rasa/nlu/emulators/wit.py index ba217216f715..6ccdf25622b8 100644 --- a/rasa/nlu/emulators/wit.py +++ b/rasa/nlu/emulators/wit.py @@ -6,7 +6,7 @@ class WitEmulator(NoEmulator): def __init__(self) -> None: - super(WitEmulator, self).__init__() + super().__init__() self.name = "wit" def normalise_response_json(self, data: Dict[Text, Any]) -> List[Dict[Text, Any]]: diff --git a/rasa/nlu/extractors/__init__.py b/rasa/nlu/extractors/__init__.py index 75f833b04615..e69de29bb2d1 100644 --- a/rasa/nlu/extractors/__init__.py +++ b/rasa/nlu/extractors/__init__.py @@ -1,89 +0,0 @@ -from typing import Any, Dict, List, Text - -from rasa.nlu.components import Component -from rasa.nlu.training_data import Message - - -class EntityExtractor(Component): - def add_extractor_name( - self, entities: List[Dict[Text, Any]] - ) -> List[Dict[Text, Any]]: - for entity in entities: - entity["extractor"] = self.name - return entities - - def add_processor_name(self, entity: Dict[Text, Any]) -> Dict[Text, Any]: - if "processors" in entity: - entity["processors"].append(self.name) - else: - entity["processors"] = [self.name] - - return entity - - @staticmethod - def filter_irrelevant_entities(extracted, requested_dimensions): - """Only return dimensions the user configured""" - - if requested_dimensions: - return [ - entity - for entity in extracted - if entity["entity"] in requested_dimensions - ] - else: - return extracted - - @staticmethod - def find_entity(ent, text, tokens): - offsets = [token.offset for token in tokens] - ends = [token.end for token in tokens] - - if ent["start"] not in offsets: - message = ( - "Invalid entity {} in example '{}': " - "entities must span whole tokens. " - "Wrong entity start.".format(ent, text) - ) - raise ValueError(message) - - if ent["end"] not in ends: - message = ( - "Invalid entity {} in example '{}': " - "entities must span whole tokens. " - "Wrong entity end.".format(ent, text) - ) - raise ValueError(message) - - start = offsets.index(ent["start"]) - end = ends.index(ent["end"]) + 1 - return start, end - - def filter_trainable_entities( - self, entity_examples: List[Message] - ) -> List[Message]: - """Filters out untrainable entity annotations. - - Creates a copy of entity_examples in which entities that have - `extractor` set to something other than - self.name (e.g. 'CRFEntityExtractor') are removed. - """ - - filtered = [] - for message in entity_examples: - entities = [] - for ent in message.get("entities", []): - extractor = ent.get("extractor") - if not extractor or extractor == self.name: - entities.append(ent) - data = message.data.copy() - data["entities"] = entities - filtered.append( - Message( - text=message.text, - data=data, - output_properties=message.output_properties, - time=message.time, - ) - ) - - return filtered diff --git a/rasa/nlu/extractors/crf_entity_extractor.py b/rasa/nlu/extractors/crf_entity_extractor.py index d1597ff64b59..f11e184defd2 100644 --- a/rasa/nlu/extractors/crf_entity_extractor.py +++ b/rasa/nlu/extractors/crf_entity_extractor.py @@ -1,49 +1,80 @@ import logging import os import typing -from typing import Any, Dict, List, Optional, Text, Tuple, Union -from rasa.nlu.config import InvalidConfigError, RasaNLUModelConfig -from rasa.nlu.extractors import EntityExtractor +import numpy as np +from typing import Any, Dict, List, Optional, Text, Tuple, Type, Callable + +import rasa.nlu.utils.bilou_utils as bilou_utils +import rasa.utils.common as common_utils +from rasa.nlu.test import determine_token_labels +from rasa.nlu.tokenizers.spacy_tokenizer import POS_TAG_KEY +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.components import Component +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.model import Metadata -from rasa.nlu.tokenizers import Token +from rasa.nlu.tokenizers.tokenizer import Token from rasa.nlu.training_data import Message, TrainingData -from rasa.constants import DOCS_BASE_URL - -try: - import spacy -except ImportError: - spacy = None +from rasa.nlu.constants import ( + TOKENS_NAMES, + TEXT, + ENTITIES, + NO_ENTITY_TAG, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_ROLE, +) +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.utils.tensorflow.constants import BILOU_FLAG logger = logging.getLogger(__name__) if typing.TYPE_CHECKING: from sklearn_crfsuite import CRF - from spacy.tokens import Doc -class CRFEntityExtractor(EntityExtractor): +class CRFToken: + def __init__( + self, + text: Text, + pos_tag: Text, + pattern: Dict[Text, Any], + dense_features: np.ndarray, + entity_tag: Text, + entity_role_tag: Text, + entity_group_tag: Text, + ): + self.text = text + self.pos_tag = pos_tag + self.pattern = pattern + self.dense_features = dense_features + self.entity_tag = entity_tag + self.entity_role_tag = entity_role_tag + self.entity_group_tag = entity_group_tag - provides = ["entities"] - requires = ["tokens"] +class CRFEntityExtractor(EntityExtractor): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Tokenizer] defaults = { # BILOU_flag determines whether to use BILOU tagging or not. # More rigorous however requires more examples per entity # rule of thumb: use only if more than 100 egs. per entity - "BILOU_flag": True, - # crf_features is [before, word, after] array with before, word, - # after holding keys about which - # features to use for each word, for example, 'title' in - # array before will have the feature - # "is the preceding word in title case?" - # POS features require spaCy to be installed + BILOU_FLAG: True, + # crf_features is [before, token, after] array with before, token, + # after holding keys about which features to use for each token, + # for example, 'title' in array before will have the feature + # "is the preceding token in title case?" + # POS features require SpacyTokenizer + # pattern feature require RegexFeaturizer "features": [ ["low", "title", "upper"], [ - "bias", "low", + "bias", "prefix5", "prefix2", "suffix5", @@ -58,323 +89,208 @@ class CRFEntityExtractor(EntityExtractor): ], # The maximum number of iterations for optimization algorithms. "max_iterations": 50, - # weight of theL1 regularization + # weight of the L1 regularization "L1_c": 0.1, # weight of the L2 regularization "L2_c": 0.1, + # Name of dense featurizers to use. + # If list is empty all available dense features are used. + "featurizers": [], } - function_dict = { - "low": lambda doc: doc[0].lower(), # pytype: disable=attribute-error - "title": lambda doc: doc[0].istitle(), # pytype: disable=attribute-error - "prefix5": lambda doc: doc[0][:5], - "prefix2": lambda doc: doc[0][:2], - "suffix5": lambda doc: doc[0][-5:], - "suffix3": lambda doc: doc[0][-3:], - "suffix2": lambda doc: doc[0][-2:], - "suffix1": lambda doc: doc[0][-1:], - "pos": lambda doc: doc[1], - "pos2": lambda doc: doc[1][:2], - "bias": lambda doc: "bias", - "upper": lambda doc: doc[0].isupper(), # pytype: disable=attribute-error - "digit": lambda doc: doc[0].isdigit(), # pytype: disable=attribute-error - "pattern": lambda doc: doc[3], + function_dict: Dict[Text, Callable[[CRFToken], Any]] = { + "low": lambda crf_token: crf_token.text.lower(), + "title": lambda crf_token: crf_token.text.istitle(), + "prefix5": lambda crf_token: crf_token.text[:5], + "prefix2": lambda crf_token: crf_token.text[:2], + "suffix5": lambda crf_token: crf_token.text[-5:], + "suffix3": lambda crf_token: crf_token.text[-3:], + "suffix2": lambda crf_token: crf_token.text[-2:], + "suffix1": lambda crf_token: crf_token.text[-1:], + "bias": lambda crf_token: "bias", + "pos": lambda crf_token: crf_token.pos_tag, + "pos2": lambda crf_token: crf_token.pos_tag[:2] + if crf_token.pos_tag is not None + else None, + "upper": lambda crf_token: crf_token.text.isupper(), + "digit": lambda crf_token: crf_token.text.isdigit(), + "pattern": lambda crf_token: crf_token.pattern, + "text_dense_features": lambda crf_token: crf_token.dense_features, + "entity": lambda crf_token: crf_token.entity_tag, } def __init__( self, component_config: Optional[Dict[Text, Any]] = None, - ent_tagger: Optional["CRF"] = None, + entity_taggers: Optional[Dict[Text, "CRF"]] = None, ) -> None: - super(CRFEntityExtractor, self).__init__(component_config) - - self.ent_tagger = ent_tagger - - self._validate_configuration() - - self._check_pos_features_and_spacy() + super().__init__(component_config) - def _check_pos_features_and_spacy(self): - import itertools + self.entity_taggers = entity_taggers - features = self.component_config.get("features", []) - fts = set(itertools.chain.from_iterable(features)) - self.pos_features = "pos" in fts or "pos2" in fts - if self.pos_features: - self._check_spacy() + self.crf_order = [ + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_GROUP, + ] - @staticmethod - def _check_spacy(): - if spacy is None: - raise ImportError( - "Failed to import `spaCy`. " - "`spaCy` is required for POS features " - "See https://spacy.io/usage/ for installation" - "instructions." - ) + self._validate_configuration() - def _validate_configuration(self): + def _validate_configuration(self) -> None: if len(self.component_config.get("features", [])) % 2 != 1: raise ValueError( "Need an odd number of crf feature lists to have a center word." ) @classmethod - def required_packages(cls): + def required_packages(cls) -> List[Text]: return ["sklearn_crfsuite", "sklearn"] def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: - # checks whether there is at least one # example with an entity annotation - if training_data.entity_examples: - self._check_spacy_doc(training_data.training_examples[0]) - - # filter out pre-trained entity examples - filtered_entity_examples = self.filter_trainable_entities( - training_data.training_examples + if not training_data.entity_examples: + logger.debug( + "No training examples with entities present. Skip training" + "of 'CRFEntityExtractor'." ) + return - # convert the dataset into features - # this will train on ALL examples, even the ones - # without annotations - dataset = self._create_dataset(filtered_entity_examples) - - self._train_model(dataset) - - def _create_dataset( - self, examples: List[Message] - ) -> List[List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]]]: - dataset = [] - for example in examples: - entity_offsets = self._convert_example(example) - dataset.append(self._from_json_to_crf(example, entity_offsets)) - return dataset - - def _check_spacy_doc(self, message): - if self.pos_features and message.get("spacy_doc") is None: - raise InvalidConfigError( - "Could not find `spacy_doc` attribute for " - "message {}\n" - "POS features require a pipeline component " - "that provides `spacy_doc` attributes, i.e. `SpacyNLP`. " - "See {}/nlu/choosing-a-pipeline/#pretrained-embeddings-spacy " - "for details".format(message.text, DOCS_BASE_URL) - ) + self.check_correct_entity_annotations(training_data) - def process(self, message: Message, **kwargs: Any) -> None: + if self.component_config[BILOU_FLAG]: + bilou_utils.apply_bilou_schema(training_data) - self._check_spacy_doc(message) + # only keep the CRFs for tags we actually have training data for + self._update_crf_order(training_data) - extracted = self.add_extractor_name(self.extract_entities(message)) - message.set( - "entities", message.get("entities", []) + extracted, add_to_output=True + # filter out pre-trained entity examples + entity_examples = self.filter_trainable_entities( + training_data.training_examples ) - @staticmethod - def _convert_example(example: Message) -> List[Tuple[int, int, Text]]: - def convert_entity(entity): - return entity["start"], entity["end"], entity["entity"] - - return [convert_entity(ent) for ent in example.get("entities", [])] + dataset = [self._convert_to_crf_tokens(example) for example in entity_examples] - def extract_entities(self, message: Message) -> List[Dict[Text, Any]]: - """Take a sentence and return entities in json format""" - - if self.ent_tagger is not None: - text_data = self._from_text_to_crf(message) - features = self._sentence_to_features(text_data) - ents = self.ent_tagger.predict_marginals_single(features) - return self._from_crf_to_json(message, ents) - else: - return [] + self._train_model(dataset) - def most_likely_entity(self, idx, entities): - if len(entities) > idx: - entity_probs = entities[idx] - else: - entity_probs = None - if entity_probs: - label = max(entity_probs, key=lambda key: entity_probs[key]) - if self.component_config["BILOU_flag"]: - # if we are using bilou flags, we will combine the prob - # of the B, I, L and U tags for an entity (so if we have a - # score of 60% for `B-address` and 40% and 30% - # for `I-address`, we will return 70%) - return ( - label, - sum([v for k, v in entity_probs.items() if k[2:] == label[2:]]), - ) - else: - return label, entity_probs[label] - else: - return "", 0.0 + def _update_crf_order(self, training_data: TrainingData): + """Train only CRFs we actually have training data for.""" + _crf_order = [] - def _create_entity_dict( - self, - message: Message, - tokens: Union["Doc", List[Token]], - start: int, - end: int, - entity: str, - confidence: float, - ) -> Dict[Text, Any]: - if isinstance(tokens, list): # tokens is a list of Token - _start = tokens[start].offset - _end = tokens[end].end - value = tokens[start].text - value += "".join( - [ - message.text[tokens[i - 1].end : tokens[i].offset] + tokens[i].text - for i in range(start + 1, end + 1) - ] - ) - else: # tokens is a Doc - _start = tokens[start].idx - _end = tokens[start : end + 1].end_char - value = tokens[start : end + 1].text - - return { - "start": _start, - "end": _end, - "value": value, - "entity": entity, - "confidence": confidence, - } + for tag_name in self.crf_order: + if tag_name == ENTITY_ATTRIBUTE_TYPE and training_data.entities: + _crf_order.append(ENTITY_ATTRIBUTE_TYPE) + elif tag_name == ENTITY_ATTRIBUTE_ROLE and training_data.entity_roles: + _crf_order.append(ENTITY_ATTRIBUTE_ROLE) + elif tag_name == ENTITY_ATTRIBUTE_GROUP and training_data.entity_groups: + _crf_order.append(ENTITY_ATTRIBUTE_GROUP) - @staticmethod - def _entity_from_label(label): - return label[2:] + self.crf_order = _crf_order - @staticmethod - def _bilou_from_label(label): - if len(label) >= 2 and label[1] == "-": - return label[0].upper() - return None + def process(self, message: Message, **kwargs: Any) -> None: + entities = self.extract_entities(message) + entities = self.add_extractor_name(entities) + message.set(ENTITIES, message.get(ENTITIES, []) + entities, add_to_output=True) - def _find_bilou_end(self, word_idx, entities): - ent_word_idx = word_idx + 1 - finished = False + def extract_entities(self, message: Message) -> List[Dict[Text, Any]]: + """Extract entities from the given message using the trained model(s).""" - # get information about the first word, tagged with `B-...` - label, confidence = self.most_likely_entity(word_idx, entities) - entity_label = self._entity_from_label(label) + if self.entity_taggers is None: + return [] - while not finished: - label, label_confidence = self.most_likely_entity(ent_word_idx, entities) + tokens = message.get(TOKENS_NAMES[TEXT]) + crf_tokens = self._convert_to_crf_tokens(message) - confidence = min(confidence, label_confidence) + predictions = {} + for tag_name, entity_tagger in self.entity_taggers.items(): + # use predicted entity tags as features for second level CRFs + include_tag_features = tag_name != ENTITY_ATTRIBUTE_TYPE + if include_tag_features: + self._add_tag_to_crf_token(crf_tokens, predictions) - if label[2:] != entity_label: - # words are not tagged the same entity class - logger.debug( - "Inconsistent BILOU tagging found, B- tag, L- " - "tag pair encloses multiple entity classes.i.e. " - "[B-a, I-b, L-a] instead of [B-a, I-a, L-a].\n" - "Assuming B- class is correct." - ) + features = self._crf_tokens_to_features(crf_tokens, include_tag_features) + predictions[tag_name] = entity_tagger.predict_marginals_single(features) - if label.startswith("L-"): - # end of the entity - finished = True - elif label.startswith("I-"): - # middle part of the entity - ent_word_idx += 1 - else: - # entity not closed by an L- tag - finished = True - ent_word_idx -= 1 - logger.debug( - "Inconsistent BILOU tagging found, B- tag not " - "closed by L- tag, i.e [B-a, I-a, O] instead of " - "[B-a, L-a, O].\nAssuming last tag is L-" - ) - return ent_word_idx, confidence + # convert predictions into a list of tags and a list of confidences + tags, confidences = self._tag_confidences(tokens, predictions) - def _handle_bilou_label(self, word_idx, entities): - label, confidence = self.most_likely_entity(word_idx, entities) - entity_label = self._entity_from_label(label) + return self.convert_predictions_into_entities( + message.text, tokens, tags, confidences + ) - if self._bilou_from_label(label) == "U": - return word_idx, confidence, entity_label + def _add_tag_to_crf_token( + self, + crf_tokens: List[CRFToken], + predictions: Dict[Text, List[Dict[Text, float]]], + ): + """Add predicted entity tags to CRF tokens.""" + if ENTITY_ATTRIBUTE_TYPE in predictions: + _tags, _ = self._most_likely_tag(predictions[ENTITY_ATTRIBUTE_TYPE]) + for tag, token in zip(_tags, crf_tokens): + token.entity_tag = tag + + def _most_likely_tag( + self, predictions: List[Dict[Text, float]] + ) -> Tuple[List[Text], List[float]]: + """Get the entity tags with the highest confidence. + + Args: + predictions: list of mappings from entity tag to confidence value + + Returns: + List of entity tags and list of confidence values. + """ + _tags = [] + _confidences = [] + + for token_predictions in predictions: + tag = max(token_predictions, key=lambda key: token_predictions[key]) + _tags.append(tag) + + if self.component_config[BILOU_FLAG]: + # if we are using BILOU flags, we will sum up the prob + # of the B, I, L and U tags for an entity + _confidences.append( + sum( + _confidence + for _tag, _confidence in token_predictions.items() + if bilou_utils.tag_without_prefix(tag) + == bilou_utils.tag_without_prefix(_tag) + ) + ) + else: + _confidences.append(token_predictions[tag]) - elif self._bilou_from_label(label) == "B": - # start of multi word-entity need to represent whole extent - ent_word_idx, confidence = self._find_bilou_end(word_idx, entities) - return ent_word_idx, confidence, entity_label + return _tags, _confidences - else: - return None, None, None + def _tag_confidences( + self, tokens: List[Token], predictions: Dict[Text, List[Dict[Text, float]]] + ) -> Tuple[Dict[Text, List[Text]], Dict[Text, List[float]]]: + """Get most likely tag predictions with confidence values for tokens.""" + tags = {} + confidences = {} - def _from_crf_to_json( - self, message: Message, entities: List[Any] - ) -> List[Dict[Text, Any]]: + for tag_name, predicted_tags in predictions.items(): + if len(tokens) != len(predicted_tags): + raise Exception( + "Inconsistency in amount of tokens between crfsuite and message" + ) - if self.pos_features: - tokens = message.get("spacy_doc") - else: - tokens = message.get("tokens") + _tags, _confidences = self._most_likely_tag(predicted_tags) - if len(tokens) != len(entities): - raise Exception( - "Inconsistency in amount of tokens between crfsuite and message" - ) + if self.component_config[BILOU_FLAG]: + _tags = bilou_utils.ensure_consistent_bilou_tagging(_tags) - if self.component_config["BILOU_flag"]: - return self._convert_bilou_tagging_to_entity_result( - message, tokens, entities - ) - else: - # not using BILOU tagging scheme, multi-word entities are split. - return self._convert_simple_tagging_to_entity_result(tokens, entities) + confidences[tag_name] = _confidences + tags[tag_name] = _tags - def _convert_bilou_tagging_to_entity_result( - self, message: Message, tokens: List[Token], entities: List[Dict[Text, float]] - ): - # using the BILOU tagging scheme - json_ents = [] - word_idx = 0 - while word_idx < len(tokens): - end_idx, confidence, entity_label = self._handle_bilou_label( - word_idx, entities - ) - - if end_idx is not None: - ent = self._create_entity_dict( - message, tokens, word_idx, end_idx, entity_label, confidence - ) - json_ents.append(ent) - word_idx = end_idx + 1 - else: - word_idx += 1 - return json_ents - - def _convert_simple_tagging_to_entity_result(self, tokens, entities): - json_ents = [] - - for word_idx in range(len(tokens)): - entity_label, confidence = self.most_likely_entity(word_idx, entities) - word = tokens[word_idx] - if entity_label != "O": - if self.pos_features: - start = word.idx - end = word.idx + len(word) - else: - start = word.offset - end = word.end - ent = { - "start": start, - "end": end, - "value": word.text, - "entity": entity_label, - "confidence": confidence, - } - json_ents.append(ent) - - return json_ents + return tags, confidences @classmethod def load( @@ -383,210 +299,287 @@ def load( model_dir: Text = None, model_metadata: Metadata = None, cached_component: Optional["CRFEntityExtractor"] = None, - **kwargs: Any + **kwargs: Any, ) -> "CRFEntityExtractor": - from sklearn.externals import joblib + import joblib - file_name = meta.get("file") - model_file = os.path.join(model_dir, file_name) + file_names = meta.get("files") + entity_taggers = {} - if os.path.exists(model_file): - ent_tagger = joblib.load(model_file) - return cls(meta, ent_tagger) - else: - return cls(meta) + if not file_names: + logger.debug( + f"Failed to load model for 'CRFEntityExtractor'. " + f"Maybe you did not provide enough training data and no model was " + f"trained or the path '{os.path.abspath(model_dir)}' doesn't exist?" + ) + return cls(component_config=meta) + + for name, file_name in file_names.items(): + model_file = os.path.join(model_dir, file_name) + if os.path.exists(model_file): + entity_taggers[name] = joblib.load(model_file) + else: + logger.debug( + f"Failed to load model for tag '{name}' for 'CRFEntityExtractor'. " + f"Maybe you did not provide enough training data and no model was " + f"trained or the path '{os.path.abspath(model_file)}' doesn't " + f"exist?" + ) + + return cls(meta, entity_taggers) def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: """Persist this model into the passed directory. Returns the metadata necessary to load the model again.""" - from sklearn.externals import joblib + import joblib - file_name = file_name + ".pkl" - if self.ent_tagger: - model_file_name = os.path.join(model_dir, file_name) - joblib.dump(self.ent_tagger, model_file_name) + file_names = {} - return {"file": file_name} + if self.entity_taggers: + for name, entity_tagger in self.entity_taggers.items(): + file_name = f"{file_name}.{name}.pkl" + model_file_name = os.path.join(model_dir, file_name) + joblib.dump(entity_tagger, model_file_name) + file_names[name] = file_name - def _sentence_to_features( - self, - sentence: List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]], + return {"files": file_names} + + def _crf_tokens_to_features( + self, crf_tokens: List[CRFToken], include_tag_features: bool = False ) -> List[Dict[Text, Any]]: - """Convert a word into discrete features in self.crf_features, - including word before and word after.""" + """Convert the list of tokens into discrete features.""" configured_features = self.component_config["features"] sentence_features = [] - for word_idx in range(len(sentence)): - # word before(-1), current word(0), next word(+1) - feature_span = len(configured_features) - half_span = feature_span // 2 - feature_range = range(-half_span, half_span + 1) - prefixes = [str(i) for i in feature_range] - word_features = {} - for f_i in feature_range: - if word_idx + f_i >= len(sentence): - word_features["EOS"] = True - # End Of Sentence - elif word_idx + f_i < 0: - word_features["BOS"] = True - # Beginning Of Sentence - else: - word = sentence[word_idx + f_i] - f_i_from_zero = f_i + half_span - prefix = prefixes[f_i_from_zero] - features = configured_features[f_i_from_zero] - for feature in features: - if feature == "pattern": - # add all regexes as a feature - regex_patterns = self.function_dict[feature](word) - # pytype: disable=attribute-error - for p_name, matched in regex_patterns.items(): - feature_name = prefix + ":" + feature + ":" + p_name - word_features[feature_name] = matched - # pytype: enable=attribute-error - else: - # append each feature to a feature vector - value = self.function_dict[feature](word) - word_features[prefix + ":" + feature] = value - sentence_features.append(word_features) + for token_idx in range(len(crf_tokens)): + # the features for the current token include features of the token + # before and after the current features (if defined in the config) + # token before (-1), current token (0), token after (+1) + window_size = len(configured_features) + half_window_size = window_size // 2 + window_range = range(-half_window_size, half_window_size + 1) + + token_features = self._create_features_for_token( + crf_tokens, + token_idx, + half_window_size, + window_range, + include_tag_features, + ) + + sentence_features.append(token_features) + return sentence_features - @staticmethod - def _sentence_to_labels( - sentence: List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]], - ) -> List[Text]: - - return [label for _, _, label, _ in sentence] - - def _from_json_to_crf( - self, message: Message, entity_offsets: List[Tuple[int, int, Text]] - ) -> List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]]: - """Convert json examples to format of underlying crfsuite.""" - - if self.pos_features: - from spacy.gold import GoldParse # pytype: disable=import-error - - doc_or_tokens = message.get("spacy_doc") - gold = GoldParse(doc_or_tokens, entities=entity_offsets) - ents = [l[5] for l in gold.orig_annot] - else: - doc_or_tokens = message.get("tokens") - ents = self._bilou_tags_from_offsets(doc_or_tokens, entity_offsets) - - # collect badly annotated examples - collected = [] - for t, e in zip(doc_or_tokens, ents): - if e == "-": - collected.append(t) - elif collected: - collected_text = " ".join([t.text for t in collected]) - logger.warning( - "Misaligned entity annotation for '{}' " - "in sentence '{}' with intent '{}'. " - "Make sure the start and end values of the " - "annotated training examples end at token " - "boundaries (e.g. don't include trailing " - "whitespaces or punctuation)." - "".format(collected_text, message.text, message.get("intent")) - ) - collected = [] + def _create_features_for_token( + self, + crf_tokens: List[CRFToken], + token_idx: int, + half_window_size: int, + window_range: range, + include_tag_features: bool, + ): + """Convert a token into discrete features including word before and word + after.""" - if not self.component_config["BILOU_flag"]: - for i, label in enumerate(ents): - if self._bilou_from_label(label) in {"B", "I", "U", "L"}: - # removes BILOU prefix from label - ents[i] = self._entity_from_label(label) + configured_features = self.component_config["features"] + prefixes = [str(i) for i in window_range] - return self._from_text_to_crf(message, ents) + token_features = {} - @staticmethod - def _bilou_tags_from_offsets(tokens, entities, missing="O"): - # From spacy.spacy.GoldParse, under MIT License - starts = {token.offset: i for i, token in enumerate(tokens)} - ends = {token.end: i for i, token in enumerate(tokens)} - bilou = ["-" for _ in tokens] - # Handle entity cases - for start_char, end_char, label in entities: - start_token = starts.get(start_char) - end_token = ends.get(end_char) - # Only interested if the tokenization is correct - if start_token is not None and end_token is not None: - if start_token == end_token: - bilou[start_token] = "U-%s" % label - else: - bilou[start_token] = "B-%s" % label - for i in range(start_token + 1, end_token): - bilou[i] = "I-%s" % label - bilou[end_token] = "L-%s" % label - # Now distinguish the O cases from ones where we miss the tokenization - entity_chars = set() - for start_char, end_char, label in entities: - for i in range(start_char, end_char): - entity_chars.add(i) - for n, token in enumerate(tokens): - for i in range(token.offset, token.end): - if i in entity_chars: - break - else: - bilou[n] = missing + # iterate over the tokens in the window range (-1, 0, +1) to collect the + # features for the token at token_idx + for pointer_position in window_range: + current_token_idx = token_idx + pointer_position - return bilou + if current_token_idx >= len(crf_tokens): + # token is at the end of the sentence + token_features["EOS"] = True + elif current_token_idx < 0: + # token is at the beginning of the sentence + token_features["BOS"] = True + else: + token = crf_tokens[current_token_idx] + + # get the features to extract for the token we are currently looking at + current_feature_idx = pointer_position + half_window_size + features = configured_features[current_feature_idx] + # we add the 'entity' feature to include the entity type as features + # for the role and group CRFs + if include_tag_features: + features.append("entity") + + prefix = prefixes[current_feature_idx] + + for feature in features: + if feature == "pattern": + # add all regexes extracted from the 'RegexFeaturizer' as a + # feature: 'pattern_name' is the name of the pattern the user + # set in the training data, 'matched' is either 'True' or + # 'False' depending on whether the token actually matches the + # pattern or not + regex_patterns = self.function_dict[feature](token) + for pattern_name, matched in regex_patterns.items(): + token_features[ + f"{prefix}:{feature}:{pattern_name}" + ] = matched + else: + value = self.function_dict[feature](token) + token_features[f"{prefix}:{feature}"] = value + + return token_features @staticmethod - def __pattern_of_token(message, i): - if message.get("tokens") is not None: - return message.get("tokens")[i].get("pattern", {}) - else: - return {} + def _crf_tokens_to_tags(crf_tokens: List[CRFToken], tag_name: Text) -> List[Text]: + """Return the list of tags for the given tag name.""" + if tag_name == ENTITY_ATTRIBUTE_ROLE: + return [crf_token.entity_role_tag for crf_token in crf_tokens] + if tag_name == ENTITY_ATTRIBUTE_GROUP: + return [crf_token.entity_group_tag for crf_token in crf_tokens] + + return [crf_token.entity_tag for crf_token in crf_tokens] @staticmethod - def __tag_of_token(token): - if spacy.about.__version__ > "2" and token._.has("tag"): - return token._.get("tag") - else: - return token.tag_ + def _pattern_of_token(message: Message, idx: int) -> Dict[Text, bool]: + """Get the patterns of the token at the given index extracted by the + 'RegexFeaturizer'. + + The 'RegexFeaturizer' adds all patterns listed in the training data to the + token. The pattern name is mapped to either 'True' (pattern applies to token) or + 'False' (pattern does not apply to token). + + Args: + message: The message. + idx: The token index. + + Returns: + The pattern dict. + """ + if message.get(TOKENS_NAMES[TEXT]) is not None: + return message.get(TOKENS_NAMES[TEXT])[idx].get("pattern", {}) + return {} + + def _get_dense_features(self, message: Message) -> Optional[List]: + """Convert dense features to python-crfsuite feature format.""" + features, _ = message.get_dense_features( + TEXT, self.component_config["featurizers"] + ) - def _from_text_to_crf( - self, message: Message, entities: List[Text] = None - ) -> List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]]: - """Takes a sentence and switches it to crfsuite format.""" + if features is None: + return None + + tokens = message.get(TOKENS_NAMES[TEXT]) + if len(tokens) != len(features): + common_utils.raise_warning( + f"Number of dense features ({len(features)}) for attribute " + f"'TEXT' does not match number of tokens ({len(tokens)}).", + docs=DOCS_URL_COMPONENTS + "#crfentityextractor", + ) + return None + + # convert to python-crfsuite feature format + features_out = [] + for feature in features: + feature_dict = { + str(index): token_features + for index, token_features in enumerate(feature) + } + converted = {"text_dense_features": feature_dict} + features_out.append(converted) + + return features_out + + def _convert_to_crf_tokens(self, message: Message) -> List[CRFToken]: + """Take a message and convert it to crfsuite format.""" crf_format = [] - if self.pos_features: - tokens = message.get("spacy_doc") - else: - tokens = message.get("tokens") + tokens = message.get(TOKENS_NAMES[TEXT]) + + text_dense_features = self._get_dense_features(message) + tags = self._get_tags(message) + for i, token in enumerate(tokens): - pattern = self.__pattern_of_token(message, i) - entity = entities[i] if entities else "N/A" - tag = self.__tag_of_token(token) if self.pos_features else None - crf_format.append((token.text, tag, entity, pattern)) + pattern = self._pattern_of_token(message, i) + entity = self.get_tag_for(tags, ENTITY_ATTRIBUTE_TYPE, i) + group = self.get_tag_for(tags, ENTITY_ATTRIBUTE_GROUP, i) + role = self.get_tag_for(tags, ENTITY_ATTRIBUTE_ROLE, i) + pos_tag = token.get(POS_TAG_KEY) + dense_features = ( + text_dense_features[i] if text_dense_features is not None else [] + ) + + crf_format.append( + CRFToken( + text=token.text, + pos_tag=pos_tag, + entity_tag=entity, + entity_group_tag=group, + entity_role_tag=role, + pattern=pattern, + dense_features=dense_features, + ) + ) + return crf_format - def _train_model( - self, - df_train: List[ - List[Tuple[Optional[Text], Optional[Text], Text, Dict[Text, Any]]] - ], - ) -> None: + def _get_tags(self, message: Message) -> Dict[Text, List[Text]]: + """Get assigned entity tags of message.""" + tokens = message.get(TOKENS_NAMES[TEXT]) + tags = {} + + for tag_name in self.crf_order: + if self.component_config[BILOU_FLAG]: + bilou_key = bilou_utils.get_bilou_key_for_tag(tag_name) + if message.get(bilou_key): + _tags = message.get(bilou_key) + else: + _tags = [NO_ENTITY_TAG for _ in tokens] + else: + _tags = [ + determine_token_labels( + token, message.get(ENTITIES), attribute_key=tag_name + ) + for token in tokens + ] + tags[tag_name] = _tags + + return tags + + def _train_model(self, df_train: List[List[CRFToken]]) -> None: """Train the crf tagger based on the training data.""" import sklearn_crfsuite - X_train = [self._sentence_to_features(sent) for sent in df_train] - y_train = [self._sentence_to_labels(sent) for sent in df_train] - self.ent_tagger = sklearn_crfsuite.CRF( - algorithm="lbfgs", - # coefficient for L1 penalty - c1=self.component_config["L1_c"], - # coefficient for L2 penalty - c2=self.component_config["L2_c"], - # stop earlier - max_iterations=self.component_config["max_iterations"], - # include transitions that are possible, but not observed - all_possible_transitions=True, - ) - self.ent_tagger.fit(X_train, y_train) + self.entity_taggers = {} + + for tag_name in self.crf_order: + logger.debug(f"Training CRF for '{tag_name}'.") + + # add entity tag features for second level CRFs + include_tag_features = tag_name != ENTITY_ATTRIBUTE_TYPE + X_train = [ + self._crf_tokens_to_features(sentence, include_tag_features) + for sentence in df_train + ] + y_train = [ + self._crf_tokens_to_tags(sentence, tag_name) for sentence in df_train + ] + + entity_tagger = sklearn_crfsuite.CRF( + algorithm="lbfgs", + # coefficient for L1 penalty + c1=self.component_config["L1_c"], + # coefficient for L2 penalty + c2=self.component_config["L2_c"], + # stop earlier + max_iterations=self.component_config["max_iterations"], + # include transitions that are possible, but not observed + all_possible_transitions=True, + ) + entity_tagger.fit(X_train, y_train) + + self.entity_taggers[tag_name] = entity_tagger + + logger.debug("Training finished.") diff --git a/rasa/nlu/extractors/duckling_http_extractor.py b/rasa/nlu/extractors/duckling_http_extractor.py index 682f98dba4dc..5af33a0d678f 100644 --- a/rasa/nlu/extractors/duckling_http_extractor.py +++ b/rasa/nlu/extractors/duckling_http_extractor.py @@ -1,19 +1,23 @@ import time - +import json import logging import os import requests from typing import Any, List, Optional, Text, Dict +import rasa.utils.endpoints as endpoints_utils +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.nlu.constants import ENTITIES from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.extractors import EntityExtractor +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.model import Metadata from rasa.nlu.training_data import Message +from rasa.utils.common import raise_warning logger = logging.getLogger(__name__) -def extract_value(match): +def extract_value(match: Dict[Text, Any]) -> Dict[Text, Any]: if match["value"].get("type") == "interval": value = { "to": match["value"].get("to", {}).get("value"), @@ -25,7 +29,9 @@ def extract_value(match): return value -def convert_duckling_format_to_rasa(matches): +def convert_duckling_format_to_rasa( + matches: List[Dict[Text, Any]] +) -> List[Dict[Text, Any]]: extracted = [] for match in matches: @@ -48,8 +54,6 @@ def convert_duckling_format_to_rasa(matches): class DucklingHTTPExtractor(EntityExtractor): """Searches for structured entites, e.g. dates, using a duckling server.""" - provides = ["entities"] - defaults = { # by default all dimensions recognized by duckling are returned # dimensions can be configured to contain an array of strings @@ -62,6 +66,9 @@ class DucklingHTTPExtractor(EntityExtractor): # timezone like Europe/Berlin # if not set the default timezone of Duckling is going to be used "timezone": None, + # Timeout for receiving response from http url of the running duckling server + # if not set the default timeout of duckling http url is set to 3 seconds. + "timeout": 3, } def __init__( @@ -70,7 +77,7 @@ def __init__( language: Optional[Text] = None, ) -> None: - super(DucklingHTTPExtractor, self).__init__(component_config) + super().__init__(component_config) self.language = language @classmethod @@ -80,7 +87,7 @@ def create( return cls(component_config, config.language) - def _locale(self): + def _locale(self) -> Optional[Text]: if not self.component_config.get("locale"): # this is king of a quick fix to generate a proper locale # works most of the time @@ -89,45 +96,60 @@ def _locale(self): self.component_config["locale"] = locale_fix return self.component_config.get("locale") - def _url(self): + def _url(self) -> Optional[Text]: """Return url of the duckling service. Environment var will override.""" if os.environ.get("RASA_DUCKLING_HTTP_URL"): return os.environ["RASA_DUCKLING_HTTP_URL"] return self.component_config.get("url") - def _payload(self, text, reference_time): + def _payload(self, text: Text, reference_time: int) -> Dict[Text, Any]: + dimensions = self.component_config["dimensions"] return { "text": text, "locale": self._locale(), "tz": self.component_config.get("timezone"), + "dims": json.dumps(dimensions), "reftime": reference_time, } - def _duckling_parse(self, text, reference_time): - """Sends the request to the duckling server and parses the result.""" + def _duckling_parse(self, text: Text, reference_time: int) -> List[Dict[Text, Any]]: + """Sends the request to the duckling server and parses the result. + + Args: + text: Text for duckling server to parse. + reference_time: Reference time in milliseconds. + Returns: + JSON response from duckling server with parse data. + """ + parse_url = endpoints_utils.concat_url(self._url(), "/parse") try: payload = self._payload(text, reference_time) headers = { "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8" } response = requests.post( - self._url() + "/parse", data=payload, headers=headers + parse_url, + data=payload, + headers=headers, + timeout=self.component_config.get("timeout"), ) if response.status_code == 200: return response.json() else: logger.error( - "Failed to get a proper response from remote " - "duckling. Status Code: {}. Response: {}" - "".format(response.status_code, response.text) + f"Failed to get a proper response from remote " + f"duckling at '{parse_url}. Status Code: {response.status_code}. Response: {response.text}" ) return [] - except requests.exceptions.ConnectionError as e: + except ( + requests.exceptions.ConnectionError, + requests.exceptions.ReadTimeout, + ) as e: logger.error( "Failed to connect to duckling http server. Make sure " - "the duckling server is running and the proper host " + "the duckling server is running/healthy/not stale and the proper host " "and port are set in the configuration. More " "information on how to run the server can be found on " "github: " @@ -137,7 +159,7 @@ def _duckling_parse(self, text, reference_time): return [] @staticmethod - def _reference_time_from_message(message): + def _reference_time_from_message(message: Message) -> int: if message.time is not None: try: return int(message.time) * 1000 @@ -163,17 +185,16 @@ def process(self, message: Message, **kwargs: Any) -> None: ) else: extracted = [] - logger.warning( + raise_warning( "Duckling HTTP component in pipeline, but no " "`url` configuration in the config " "file nor is `RASA_DUCKLING_HTTP_URL` " - "set as an environment variable." + "set as an environment variable. No entities will be extracted!", + docs=DOCS_URL_COMPONENTS + "#ducklinghttpextractor", ) extracted = self.add_extractor_name(extracted) - message.set( - "entities", message.get("entities", []) + extracted, add_to_output=True - ) + message.set(ENTITIES, message.get(ENTITIES, []) + extracted, add_to_output=True) @classmethod def load( @@ -182,7 +203,7 @@ def load( model_dir: Text = None, model_metadata: Optional[Metadata] = None, cached_component: Optional["DucklingHTTPExtractor"] = None, - **kwargs: Any + **kwargs: Any, ) -> "DucklingHTTPExtractor": language = model_metadata.get("language") if model_metadata else None diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 15584af6f17a..94116671b98f 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -1,47 +1,53 @@ import os -import warnings -from typing import Any, Dict, Optional, Text +from typing import Any, Dict, List, Optional, Text, Type -from rasa.nlu import utils +from rasa.nlu.components import Component +from rasa.constants import DOCS_URL_TRAINING_DATA_NLU +from rasa.nlu.constants import ENTITIES from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.extractors import EntityExtractor +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.model import Metadata from rasa.nlu.training_data import Message, TrainingData from rasa.nlu.utils import write_json_to_file import rasa.utils.io +from rasa.utils.common import raise_warning class EntitySynonymMapper(EntityExtractor): - - provides = ["entities"] + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [EntityExtractor] def __init__( self, - component_config: Optional[Dict[Text, Text]] = None, + component_config: Optional[Dict[Text, Any]] = None, synonyms: Optional[Dict[Text, Any]] = None, ) -> None: - super(EntitySynonymMapper, self).__init__(component_config) + super().__init__(component_config) self.synonyms = synonyms if synonyms else {} def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: for key, value in list(training_data.entity_synonyms.items()): self.add_entities_if_synonyms(key, value) for example in training_data.entity_examples: - for entity in example.get("entities", []): + for entity in example.get(ENTITIES, []): entity_val = example.text[entity["start"] : entity["end"]] self.add_entities_if_synonyms(entity_val, str(entity.get("value"))) def process(self, message: Message, **kwargs: Any) -> None: - updated_entities = message.get("entities", [])[:] + updated_entities = message.get(ENTITIES, [])[:] self.replace_synonyms(updated_entities) - message.set("entities", updated_entities, add_to_output=True) + message.set(ENTITIES, updated_entities, add_to_output=True) def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: @@ -62,7 +68,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional[Metadata] = None, cached_component: Optional["EntitySynonymMapper"] = None, - **kwargs: Any + **kwargs: Any, ) -> "EntitySynonymMapper": file_name = meta.get("file") @@ -75,12 +81,13 @@ def load( synonyms = rasa.utils.io.read_json_file(entity_synonyms_file) else: synonyms = None - warnings.warn( - "Failed to load synonyms file from '{}'".format(entity_synonyms_file) + raise_warning( + f"Failed to load synonyms file from '{entity_synonyms_file}'.", + docs=DOCS_URL_TRAINING_DATA_NLU + "#entity-synonyms", ) return cls(meta, synonyms) - def replace_synonyms(self, entities): + def replace_synonyms(self, entities) -> None: for entity in entities: # need to wrap in `str` to handle e.g. entity values of type int entity_value = str(entity["value"]) @@ -88,7 +95,7 @@ def replace_synonyms(self, entities): entity["value"] = self.synonyms[entity_value.lower()] self.add_processor_name(entity) - def add_entities_if_synonyms(self, entity_a, entity_b): + def add_entities_if_synonyms(self, entity_a, entity_b) -> None: if entity_b is not None: original = str(entity_a) replacement = str(entity_b) @@ -96,17 +103,15 @@ def add_entities_if_synonyms(self, entity_a, entity_b): if original != replacement: original = original.lower() if original in self.synonyms and self.synonyms[original] != replacement: - warnings.warn( - "Found conflicting synonym definitions " - "for {}. Overwriting target {} with {}. " - "Check your training data and remove " - "conflicting synonym definitions to " - "prevent this from happening." - "".format( - repr(original), - repr(self.synonyms[original]), - repr(replacement), - ) + raise_warning( + f"Found conflicting synonym definitions " + f"for {repr(original)}. Overwriting target " + f"{repr(self.synonyms[original])} with " + f"{repr(replacement)}. " + f"Check your training data and remove " + f"conflicting synonym definitions to " + f"prevent this from happening.", + docs=DOCS_URL_TRAINING_DATA_NLU + "#entity-synonyms", ) self.synonyms[original] = replacement diff --git a/rasa/nlu/extractors/extractor.py b/rasa/nlu/extractors/extractor.py new file mode 100644 index 000000000000..fe109fe59f98 --- /dev/null +++ b/rasa/nlu/extractors/extractor.py @@ -0,0 +1,348 @@ +from typing import Any, Dict, List, Text, Tuple, Optional + +from rasa.constants import DOCS_URL_TRAINING_DATA_NLU +from rasa.nlu.training_data import TrainingData +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.components import Component +from rasa.nlu.constants import ( + EXTRACTOR, + ENTITIES, + TOKENS_NAMES, + TEXT, + NO_ENTITY_TAG, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_CONFIDENCE_TYPE, + ENTITY_ATTRIBUTE_CONFIDENCE_ROLE, + ENTITY_ATTRIBUTE_CONFIDENCE_GROUP, + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_END, + INTENT, +) +from rasa.nlu.training_data import Message +import rasa.utils.common as common_utils +import rasa.nlu.utils.bilou_utils as bilou_utils + + +class EntityExtractor(Component): + def add_extractor_name( + self, entities: List[Dict[Text, Any]] + ) -> List[Dict[Text, Any]]: + for entity in entities: + entity[EXTRACTOR] = self.name + return entities + + def add_processor_name(self, entity: Dict[Text, Any]) -> Dict[Text, Any]: + if "processors" in entity: + entity["processors"].append(self.name) + else: + entity["processors"] = [self.name] + + return entity + + @staticmethod + def filter_irrelevant_entities(extracted: list, requested_dimensions: set) -> list: + """Only return dimensions the user configured""" + + if requested_dimensions: + return [ + entity + for entity in extracted + if entity[ENTITY_ATTRIBUTE_TYPE] in requested_dimensions + ] + return extracted + + @staticmethod + def find_entity( + entity: Dict[Text, Any], text: Text, tokens: List[Token] + ) -> Tuple[int, int]: + offsets = [token.start for token in tokens] + ends = [token.end for token in tokens] + + if entity[ENTITY_ATTRIBUTE_START] not in offsets: + message = ( + "Invalid entity {} in example '{}': " + "entities must span whole tokens. " + "Wrong entity start.".format(entity, text) + ) + raise ValueError(message) + + if entity[ENTITY_ATTRIBUTE_END] not in ends: + message = ( + "Invalid entity {} in example '{}': " + "entities must span whole tokens. " + "Wrong entity end.".format(entity, text) + ) + raise ValueError(message) + + start = offsets.index(entity[ENTITY_ATTRIBUTE_START]) + end = ends.index(entity[ENTITY_ATTRIBUTE_END]) + 1 + return start, end + + def filter_trainable_entities( + self, entity_examples: List[Message] + ) -> List[Message]: + """Filters out untrainable entity annotations. + + Creates a copy of entity_examples in which entities that have + `extractor` set to something other than + self.name (e.g. 'CRFEntityExtractor') are removed. + """ + + filtered = [] + for message in entity_examples: + entities = [] + for ent in message.get(ENTITIES, []): + extractor = ent.get(EXTRACTOR) + if not extractor or extractor == self.name: + entities.append(ent) + data = message.data.copy() + data[ENTITIES] = entities + filtered.append( + Message( + text=message.text, + data=data, + output_properties=message.output_properties, + time=message.time, + features=message.features, + ) + ) + + return filtered + + def convert_predictions_into_entities( + self, + text: Text, + tokens: List[Token], + tags: Dict[Text, List[Text]], + confidences: Optional[Dict[Text, List[float]]] = None, + ) -> List[Dict[Text, Any]]: + """ + Convert predictions into entities. + + Args: + text: The text message. + tokens: Message tokens without CLS token. + tags: Predicted tags. + confidences: Confidences of predicted tags. + + Returns: + Entities. + """ + entities = [] + + last_entity_tag = NO_ENTITY_TAG + last_role_tag = NO_ENTITY_TAG + last_group_tag = NO_ENTITY_TAG + last_token_end = -1 + + for idx, token in enumerate(tokens): + current_entity_tag = self.get_tag_for(tags, ENTITY_ATTRIBUTE_TYPE, idx) + + if current_entity_tag == NO_ENTITY_TAG: + last_entity_tag = NO_ENTITY_TAG + last_token_end = token.end + continue + + current_group_tag = self.get_tag_for(tags, ENTITY_ATTRIBUTE_GROUP, idx) + current_role_tag = self.get_tag_for(tags, ENTITY_ATTRIBUTE_ROLE, idx) + + group_or_role_changed = ( + last_group_tag != current_group_tag or last_role_tag != current_role_tag + ) + + if bilou_utils.bilou_prefix_from_tag(current_entity_tag): + # checks for new bilou tag + # new bilou tag begins are not with I- , L- tags + new_bilou_tag_starts = last_entity_tag != current_entity_tag and ( + bilou_utils.LAST + != bilou_utils.bilou_prefix_from_tag(current_entity_tag) + and bilou_utils.INSIDE + != bilou_utils.bilou_prefix_from_tag(current_entity_tag) + ) + + # to handle bilou tags such as only I-, L- tags without B-tag + # and handle multiple U-tags consecutively + new_unigram_bilou_tag_starts = ( + last_entity_tag == NO_ENTITY_TAG + or bilou_utils.UNIT + == bilou_utils.bilou_prefix_from_tag(current_entity_tag) + ) + + new_tag_found = ( + new_bilou_tag_starts + or new_unigram_bilou_tag_starts + or group_or_role_changed + ) + last_entity_tag = current_entity_tag + current_entity_tag = bilou_utils.tag_without_prefix(current_entity_tag) + else: + new_tag_found = ( + last_entity_tag != current_entity_tag or group_or_role_changed + ) + last_entity_tag = current_entity_tag + + if new_tag_found: + # new entity found + entity = self._create_new_entity( + list(tags.keys()), + current_entity_tag, + current_group_tag, + current_role_tag, + token, + idx, + confidences, + ) + entities.append(entity) + elif token.start - last_token_end <= 1: + # current token has the same entity tag as the token before and + # the two tokens are only separated by at most one symbol (e.g. space, + # dash, etc.) + entities[-1][ENTITY_ATTRIBUTE_END] = token.end + if confidences is not None: + self._update_confidence_values(entities, confidences, idx) + else: + # the token has the same entity tag as the token before but the two + # tokens are separated by at least 2 symbols (e.g. multiple spaces, + # a comma and a space, etc.) + entity = self._create_new_entity( + list(tags.keys()), + current_entity_tag, + current_group_tag, + current_role_tag, + token, + idx, + confidences, + ) + entities.append(entity) + + last_group_tag = current_group_tag + last_role_tag = current_role_tag + last_token_end = token.end + + for entity in entities: + entity[ENTITY_ATTRIBUTE_VALUE] = text[ + entity[ENTITY_ATTRIBUTE_START] : entity[ENTITY_ATTRIBUTE_END] + ] + + return entities + + @staticmethod + def _update_confidence_values( + entities: List[Dict[Text, Any]], confidences: Dict[Text, List[float]], idx: int + ): + # use the lower confidence value + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_TYPE] = min( + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_TYPE], + confidences[ENTITY_ATTRIBUTE_TYPE][idx], + ) + if ENTITY_ATTRIBUTE_ROLE in entities[-1]: + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_ROLE] = min( + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_ROLE], + confidences[ENTITY_ATTRIBUTE_ROLE][idx], + ) + if ENTITY_ATTRIBUTE_GROUP in entities[-1]: + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_GROUP] = min( + entities[-1][ENTITY_ATTRIBUTE_CONFIDENCE_GROUP], + confidences[ENTITY_ATTRIBUTE_GROUP][idx], + ) + + @staticmethod + def get_tag_for(tags: Dict[Text, List[Text]], tag_name: Text, idx: int) -> Text: + """Get the value of the given tag name from the list of tags. + + Args: + tags: Mapping of tag name to list of tags; + tag_name: The tag name of interest. + idx: The index position of the tag. + + Returns: + The tag value. + """ + if tag_name in tags: + return tags[tag_name][idx] + return NO_ENTITY_TAG + + @staticmethod + def _create_new_entity( + tag_names: List[Text], + entity_tag: Text, + group_tag: Text, + role_tag: Text, + token: Token, + idx: int, + confidences: Optional[Dict[Text, List[float]]] = None, + ) -> Dict[Text, Any]: + """Create a new entity. + + Args: + tag_names: The tag names to include in the entity. + entity_tag: The entity type value. + group_tag: The entity group value. + role_tag: The entity role value. + token: The token. + confidence: The confidence value. + + Returns: + Created entity. + """ + entity = { + ENTITY_ATTRIBUTE_TYPE: entity_tag, + ENTITY_ATTRIBUTE_START: token.start, + ENTITY_ATTRIBUTE_END: token.end, + } + + if confidences is not None: + entity[ENTITY_ATTRIBUTE_CONFIDENCE_TYPE] = confidences[ + ENTITY_ATTRIBUTE_TYPE + ][idx] + + if ENTITY_ATTRIBUTE_ROLE in tag_names and role_tag != NO_ENTITY_TAG: + entity[ENTITY_ATTRIBUTE_ROLE] = role_tag + if confidences is not None: + entity[ENTITY_ATTRIBUTE_CONFIDENCE_ROLE] = confidences[ + ENTITY_ATTRIBUTE_ROLE + ][idx] + if ENTITY_ATTRIBUTE_GROUP in tag_names and group_tag != NO_ENTITY_TAG: + entity[ENTITY_ATTRIBUTE_GROUP] = group_tag + if confidences is not None: + entity[ENTITY_ATTRIBUTE_CONFIDENCE_GROUP] = confidences[ + ENTITY_ATTRIBUTE_GROUP + ][idx] + + return entity + + @staticmethod + def check_correct_entity_annotations(training_data: TrainingData) -> None: + """Check if entities are correctly annotated in the training data. + + If the start and end values of an entity do not match any start and end values + of the respected token, we define an entity as misaligned and log a warning. + + Args: + training_data: The training data. + """ + for example in training_data.entity_examples: + entity_boundaries = [ + (entity[ENTITY_ATTRIBUTE_START], entity[ENTITY_ATTRIBUTE_END]) + for entity in example.get(ENTITIES) + ] + token_start_positions = [t.start for t in example.get(TOKENS_NAMES[TEXT])] + token_end_positions = [t.end for t in example.get(TOKENS_NAMES[TEXT])] + + for entity_start, entity_end in entity_boundaries: + if ( + entity_start not in token_start_positions + or entity_end not in token_end_positions + ): + common_utils.raise_warning( + f"Misaligned entity annotation in message '{example.text}' " + f"with intent '{example.get(INTENT)}'. Make sure the start and " + f"end values of entities in the training data match the token " + f"boundaries (e.g. entities don't include trailing whitespaces " + f"or punctuation).", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + break diff --git a/rasa/nlu/extractors/mitie_entity_extractor.py b/rasa/nlu/extractors/mitie_entity_extractor.py index e91c6e8cc3c7..90fa35f3d47b 100644 --- a/rasa/nlu/extractors/mitie_entity_extractor.py +++ b/rasa/nlu/extractors/mitie_entity_extractor.py @@ -1,12 +1,17 @@ import logging import os import typing -from typing import Any, Dict, List, Optional, Text +from typing import Any, Dict, List, Optional, Text, Type +from rasa.nlu.constants import ENTITIES, TOKENS_NAMES, TEXT from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.extractors import EntityExtractor +from rasa.nlu.utils.mitie_utils import MitieNLP +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.components import Component +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.model import Metadata from rasa.nlu.training_data import Message, TrainingData +from rasa.utils.common import raise_warning logger = logging.getLogger(__name__) @@ -15,29 +20,30 @@ class MitieEntityExtractor(EntityExtractor): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [MitieNLP, Tokenizer] - provides = ["entities"] - - requires = ["tokens", "mitie_feature_extractor", "mitie_file"] - - def __init__(self, component_config: Dict[Text, Any] = None, ner=None): + def __init__(self, component_config: Optional[Dict[Text, Any]] = None, ner=None): """Construct a new intent classifier using the sklearn framework.""" - super(MitieEntityExtractor, self).__init__(component_config) + super().__init__(component_config) self.ner = ner @classmethod def required_packages(cls) -> List[Text]: return ["mitie"] - def extract_entities(self, text, tokens, feature_extractor): + def extract_entities( + self, text: Text, tokens: List[Token], feature_extractor + ) -> List[Dict[Text, Any]]: ents = [] tokens_strs = [token.text for token in tokens] if self.ner: entities = self.ner.extract_entities(tokens_strs, feature_extractor) for e in entities: if len(e[0]): - start = tokens[e[0][0]].offset + start = tokens[e[0][0]].start end = tokens[e[0][-1]].end ents.append( @@ -53,7 +59,10 @@ def extract_entities(self, text, tokens, feature_extractor): return ents def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, ) -> None: import mitie @@ -84,28 +93,34 @@ def train( if found_one_entity: self.ner = trainer.train() - def _prepare_mitie_sample(self, training_example): + @staticmethod + def _prepare_mitie_sample(training_example: Message) -> Any: import mitie text = training_example.text - tokens = training_example.get("tokens") + tokens = training_example.get(TOKENS_NAMES[TEXT]) sample = mitie.ner_training_instance([t.text for t in tokens]) - for ent in training_example.get("entities", []): + for ent in training_example.get(ENTITIES, []): try: # if the token is not aligned an exception will be raised start, end = MitieEntityExtractor.find_entity(ent, text, tokens) except ValueError as e: - logger.warning("Example skipped: {}".format(str(e))) + raise_warning( + f"Failed to use example '{text}' to train MITIE " + f"entity extractor. Example will be skipped." + f"Error: {e}" + ) continue try: # mitie will raise an exception on malicious # input - e.g. on overlapping entities sample.add_entity(list(range(start, end)), ent["entity"]) except Exception as e: - logger.warning( - "Failed to add entity example " - "'{}' of sentence '{}'. Reason: " - "{}".format(str(e), str(text), e) + raise_warning( + f"Failed to add entity example " + f"'{str(e)}' of sentence '{str(text)}'. " + f"Example will be ignored. Reason: " + f"{e}" ) continue return sample @@ -120,12 +135,10 @@ def process(self, message: Message, **kwargs: Any) -> None: ) ents = self.extract_entities( - message.text, message.get("tokens"), mitie_feature_extractor + message.text, message.get(TOKENS_NAMES[TEXT]), mitie_feature_extractor ) extracted = self.add_extractor_name(ents) - message.set( - "entities", message.get("entities", []) + extracted, add_to_output=True - ) + message.set(ENTITIES, message.get(ENTITIES, []) + extracted, add_to_output=True) @classmethod def load( @@ -134,7 +147,7 @@ def load( model_dir: Text = None, model_metadata: Metadata = None, cached_component: Optional["MitieEntityExtractor"] = None, - **kwargs: Any + **kwargs: Any, ) -> "MitieEntityExtractor": import mitie diff --git a/rasa/nlu/extractors/regex_entity_extractor.py b/rasa/nlu/extractors/regex_entity_extractor.py new file mode 100644 index 000000000000..73bf83c50c23 --- /dev/null +++ b/rasa/nlu/extractors/regex_entity_extractor.py @@ -0,0 +1,132 @@ +import logging +import os +import re +from typing import Any, Dict, List, Optional, Text + +import rasa.utils.io as io_utils +import rasa.utils.common as common_utils +import rasa.nlu.utils.pattern_utils as pattern_utils +from rasa.nlu.model import Metadata +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import TrainingData +from rasa.nlu.constants import ( + ENTITIES, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_END, +) +from rasa.nlu.training_data import Message +from rasa.nlu.extractors.extractor import EntityExtractor + +logger = logging.getLogger(__name__) + + +class RegexEntityExtractor(EntityExtractor): + """Searches for entities in the user's message using the lookup tables and regexes + defined in the training data.""" + + defaults = { + # text will be processed with case insensitive as default + "case_sensitive": False, + # use lookup tables to extract entities + "use_lookup_tables": True, + # use regexes to extract entities + "use_regexes": True, + } + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + patterns: Optional[List[Dict[Text, Text]]] = None, + ): + super(RegexEntityExtractor, self).__init__(component_config) + + self.case_sensitive = self.component_config["case_sensitive"] + self.patterns = patterns or [] + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + self.patterns = pattern_utils.extract_patterns( + training_data, + use_lookup_tables=self.component_config["use_lookup_tables"], + use_regexes=self.component_config["use_regexes"], + use_only_entities=True, + ) + + if not self.patterns: + common_utils.raise_warning( + "No lookup tables or regexes defined in the training data that have " + "a name equal to any entity in the training data. In order for this " + "component to work you need to define valid lookup tables or regexes " + "in the training data." + ) + + def process(self, message: Message, **kwargs: Any) -> None: + if not self.patterns: + return + + extracted_entities = self._extract_entities(message) + extracted_entities = self.add_extractor_name(extracted_entities) + + message.set( + ENTITIES, message.get(ENTITIES, []) + extracted_entities, add_to_output=True + ) + + def _extract_entities(self, message: Message) -> List[Dict[Text, Any]]: + """Extract entities of the given type from the given user message.""" + entities = [] + + flags = 0 # default flag + if not self.case_sensitive: + flags = re.IGNORECASE + + for pattern in self.patterns: + matches = re.finditer(pattern["pattern"], message.text, flags=flags) + matches = list(matches) + + for match in matches: + start_index = match.start() + end_index = match.end() + entities.append( + { + ENTITY_ATTRIBUTE_TYPE: pattern["name"], + ENTITY_ATTRIBUTE_START: start_index, + ENTITY_ATTRIBUTE_END: end_index, + ENTITY_ATTRIBUTE_VALUE: message.text[start_index:end_index], + } + ) + + return entities + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Optional[Text] = None, + model_metadata: Optional[Metadata] = None, + cached_component: Optional["RegexEntityExtractor"] = None, + **kwargs: Any, + ) -> "RegexEntityExtractor": + + file_name = meta.get("file") + regex_file = os.path.join(model_dir, file_name) + + if os.path.exists(regex_file): + patterns = io_utils.read_json_file(regex_file) + return RegexEntityExtractor(meta, patterns=patterns) + + return RegexEntityExtractor(meta) + + def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: + """Persist this model into the passed directory. + Return the metadata necessary to load the model again.""" + file_name = f"{file_name}.json" + regex_file = os.path.join(model_dir, file_name) + io_utils.dump_obj_as_json_to_file(regex_file, self.patterns) + + return {"file": file_name} diff --git a/rasa/nlu/extractors/spacy_entity_extractor.py b/rasa/nlu/extractors/spacy_entity_extractor.py index 04b508cc3182..d0546e031b8f 100644 --- a/rasa/nlu/extractors/spacy_entity_extractor.py +++ b/rasa/nlu/extractors/spacy_entity_extractor.py @@ -1,7 +1,10 @@ import typing -from typing import Any, Dict, List, Text +from typing import Any, Dict, List, Text, Optional, Type -from rasa.nlu.extractors import EntityExtractor +from rasa.nlu.constants import ENTITIES +from rasa.nlu.utils.spacy_utils import SpacyNLP +from rasa.nlu.components import Component +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.training_data import Message if typing.TYPE_CHECKING: @@ -9,10 +12,9 @@ class SpacyEntityExtractor(EntityExtractor): - - provides = ["entities"] - - requires = ["spacy_nlp"] + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [SpacyNLP] defaults = { # by default all dimensions recognized by spacy are returned @@ -21,8 +23,8 @@ class SpacyEntityExtractor(EntityExtractor): "dimensions": None } - def __init__(self, component_config: Text = None) -> None: - super(SpacyEntityExtractor, self).__init__(component_config) + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + super().__init__(component_config) def process(self, message: Message, **kwargs: Any) -> None: # can't use the existing doc here (spacy_doc on the message) @@ -34,9 +36,7 @@ def process(self, message: Message, **kwargs: Any) -> None: extracted = SpacyEntityExtractor.filter_irrelevant_entities( all_extracted, dimensions ) - message.set( - "entities", message.get("entities", []) + extracted, add_to_output=True - ) + message.set(ENTITIES, message.get(ENTITIES, []) + extracted, add_to_output=True) @staticmethod def extract_entities(doc: "Doc") -> List[Dict[Text, Any]]: diff --git a/rasa/nlu/featurizers/__init__.py b/rasa/nlu/featurizers/__init__.py index e419ab5e03a0..e69de29bb2d1 100644 --- a/rasa/nlu/featurizers/__init__.py +++ b/rasa/nlu/featurizers/__init__.py @@ -1,17 +0,0 @@ -import numpy as np - -from rasa.nlu.components import Component -from rasa.nlu.constants import MESSAGE_VECTOR_FEATURE_NAMES, MESSAGE_TEXT_ATTRIBUTE - - -class Featurizer(Component): - @staticmethod - def _combine_with_existing_features( - message, - additional_features, - feature_name=MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], - ): - if message.get(feature_name) is not None: - return np.hstack((message.get(feature_name), additional_features)) - else: - return additional_features diff --git a/rasa/nlu/featurizers/count_vectors_featurizer.py b/rasa/nlu/featurizers/count_vectors_featurizer.py deleted file mode 100644 index 934a2b5c8dc3..000000000000 --- a/rasa/nlu/featurizers/count_vectors_featurizer.py +++ /dev/null @@ -1,628 +0,0 @@ -import logging -import os -import re -from typing import Any, Dict, List, Optional, Text, Union -import numpy as np - -from sklearn.feature_extraction.text import CountVectorizer -from rasa.nlu import utils -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.featurizers import Featurizer -from rasa.nlu.model import Metadata -from rasa.nlu.training_data import Message, TrainingData - -logger = logging.getLogger(__name__) - -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, - SPACY_FEATURIZABLE_ATTRIBUTES, -) - - -class CountVectorsFeaturizer(Featurizer): - """Bag of words featurizer - - Creates bag-of-words representation of intent features - using sklearn's `CountVectorizer`. - All tokens which consist only of digits (e.g. 123 and 99 - but not ab12d) will be represented by a single feature. - - Set `analyzer` to 'char_wb' - to use the idea of Subword Semantic Hashing - from https://arxiv.org/abs/1810.07150. - """ - - provides = [ - MESSAGE_VECTOR_FEATURE_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES - ] - - requires = [] - - defaults = { - # whether to use a shared vocab - "use_shared_vocab": False, - # the parameters are taken from - # sklearn's CountVectorizer - # whether to use word or character n-grams - # 'char_wb' creates character n-grams inside word boundaries - # n-grams at the edges of words are padded with space. - "analyzer": "word", # use 'char' or 'char_wb' for character - # regular expression for tokens - # only used if analyzer == 'word' - "token_pattern": r"(?u)\b\w\w+\b", - # remove accents during the preprocessing step - "strip_accents": None, # {'ascii', 'unicode', None} - # list of stop words - "stop_words": None, # string {'english'}, list, or None (default) - # min document frequency of a word to add to vocabulary - # float - the parameter represents a proportion of documents - # integer - absolute counts - "min_df": 1, # float in range [0.0, 1.0] or int - # max document frequency of a word to add to vocabulary - # float - the parameter represents a proportion of documents - # integer - absolute counts - "max_df": 1.0, # float in range [0.0, 1.0] or int - # set range of ngrams to be extracted - "min_ngram": 1, # int - "max_ngram": 1, # int - # limit vocabulary size - "max_features": None, # int or None - # if convert all characters to lowercase - "lowercase": True, # bool - # handling Out-Of-Vacabulary (OOV) words - # will be converted to lowercase if lowercase is True - "OOV_token": None, # string or None - "OOV_words": [], # string or list of strings - } - - @classmethod - def required_packages(cls) -> List[Text]: - return ["sklearn"] - - def _load_count_vect_params(self): - - # Use shared vocabulary between text and all other attributes of Message - self.use_shared_vocab = self.component_config["use_shared_vocab"] - - # set analyzer - self.analyzer = self.component_config["analyzer"] - - # regular expression for tokens - self.token_pattern = self.component_config["token_pattern"] - - # remove accents during the preprocessing step - self.strip_accents = self.component_config["strip_accents"] - - # list of stop words - self.stop_words = self.component_config["stop_words"] - - # min number of word occurancies in the document to add to vocabulary - self.min_df = self.component_config["min_df"] - - # max number (fraction if float) of word occurancies - # in the document to add to vocabulary - self.max_df = self.component_config["max_df"] - - # set ngram range - self.min_ngram = self.component_config["min_ngram"] - self.max_ngram = self.component_config["max_ngram"] - - # limit vocabulary size - self.max_features = self.component_config["max_features"] - - # if convert all characters to lowercase - self.lowercase = self.component_config["lowercase"] - - # noinspection PyPep8Naming - def _load_OOV_params(self): - self.OOV_token = self.component_config["OOV_token"] - - self.OOV_words = self.component_config["OOV_words"] - if self.OOV_words and not self.OOV_token: - logger.error( - "The list OOV_words={} was given, but " - "OOV_token was not. OOV words are ignored." - "".format(self.OOV_words) - ) - self.OOV_words = [] - - if self.lowercase and self.OOV_token: - # convert to lowercase - self.OOV_token = self.OOV_token.lower() - if self.OOV_words: - self.OOV_words = [w.lower() for w in self.OOV_words] - - def _check_attribute_vocabulary(self, attribute: Text) -> bool: - """Check if trained vocabulary exists in attribute's count vectorizer""" - try: - return hasattr(self.vectorizers[attribute], "vocabulary_") - except (AttributeError, TypeError): - return False - - def _get_attribute_vocabulary(self, attribute: Text) -> Optional[Dict[Text, int]]: - """Get trained vocabulary from attribute's count vectorizer""" - - try: - return self.vectorizers[attribute].vocabulary_ - except (AttributeError, TypeError): - return None - - def _collect_vectorizer_vocabularies(self): - """Get vocabulary for all attributes""" - - attribute_vocabularies = {} - for attribute in MESSAGE_ATTRIBUTES: - attribute_vocabularies[attribute] = self._get_attribute_vocabulary( - attribute - ) - return attribute_vocabularies - - def _get_attribute_vocabulary_tokens(self, attribute: Text) -> Optional[List[Text]]: - """Get all keys of vocabulary of an attribute""" - - attribute_vocabulary = self._get_attribute_vocabulary(attribute) - try: - return list(attribute_vocabulary.keys()) - except TypeError: - return None - - def _check_analyzer(self): - if self.analyzer != "word": - if self.OOV_token is not None: - logger.warning( - "Analyzer is set to character, " - "provided OOV word token will be ignored." - ) - if self.stop_words is not None: - logger.warning( - "Analyzer is set to character, " - "provided stop words will be ignored." - ) - if self.max_ngram == 1: - logger.warning( - "Analyzer is set to character, " - "but max n-gram is set to 1. " - "It means that the vocabulary will " - "contain single letters only." - ) - - def __init__( - self, - component_config: Dict[Text, Any] = None, - vectorizers: Optional[Dict[Text, "CountVectorizer"]] = None, - ) -> None: - """Construct a new count vectorizer using the sklearn framework.""" - - super(CountVectorsFeaturizer, self).__init__(component_config) - - # parameters for sklearn's CountVectorizer - self._load_count_vect_params() - - # handling Out-Of-Vocabulary (OOV) words - self._load_OOV_params() - - # warn that some of config parameters might be ignored - self._check_analyzer() - - # declare class instance for CountVectorizer - self.vectorizers = vectorizers - - def _get_message_text_by_attribute( - self, message: "Message", attribute: Text = MESSAGE_TEXT_ATTRIBUTE - ) -> Text: - """Get processed text of attribute of a message""" - - if message.get(attribute) is None: - # return empty string since sklearn countvectorizer does not like None object while training and predicting - return "" - - tokens = self._get_message_tokens_by_attribute(message, attribute) - - text = self.process_text(tokens) - - text = self._replace_with_oov_token(text, attribute) - - return text - - def process_text(self, tokens: List[Text]) -> Text: - """Apply an processing, cleaning steps to text""" - - # replace all digits with NUMBER token - text = re.sub(r"\b[0-9]+\b", "__NUMBER__", " ".join(tokens)) - - # convert to lowercase if necessary - if self.lowercase: - text = text.lower() - return text - - def _replace_with_oov_token(self, text: Text, attribute: Text) -> Text: - """Replace OOV words with OOV token""" - - if self.OOV_token and self.analyzer == "word": - text_tokens = text.split() - if self._check_attribute_vocabulary( - attribute - ) and self.OOV_token in self._get_attribute_vocabulary(attribute): - # CountVectorizer is trained, process for prediction - text_tokens = [ - t - if t in self._get_attribute_vocabulary_tokens(attribute) - else self.OOV_token - for t in text_tokens - ] - elif self.OOV_words: - # CountVectorizer is not trained, process for train - text_tokens = [ - self.OOV_token if t in self.OOV_words else t for t in text_tokens - ] - text = " ".join(text_tokens) - return text - - @staticmethod - def _get_message_tokens_by_attribute( - message: "Message", attribute: Text - ) -> List[Text]: - """Get text tokens of an attribute of a message""" - - if attribute in SPACY_FEATURIZABLE_ATTRIBUTES and message.get( - MESSAGE_SPACY_FEATURES_NAMES[attribute] - ): # if lemmatize is possible - tokens = [ - t.lemma_ for t in message.get(MESSAGE_SPACY_FEATURES_NAMES[attribute]) - ] - elif message.get( - MESSAGE_TOKENS_NAMES[attribute] - ): # if directly tokens is provided - tokens = [t.text for t in message.get(MESSAGE_TOKENS_NAMES[attribute])] - else: - tokens = message.get(attribute).split() - return tokens - - # noinspection PyPep8Naming - def _check_OOV_present(self, examples): - """Check if an OOV word is present""" - if self.OOV_token and not self.OOV_words: - for t in examples: - if ( - t is None - or self.OOV_token in t - or (self.lowercase and self.OOV_token in t.lower()) - ): - return - - logger.warning( - "OOV_token='{}' was given, but it is not present " - "in the training data. All unseen words " - "will be ignored during prediction." - "".format(self.OOV_token) - ) - - def _set_attribute_features( - self, - attribute: Text, - attribute_features: np.ndarray, - training_data: "TrainingData", - ): - """Set computed features of the attribute to corresponding message objects""" - for i, example in enumerate(training_data.intent_examples): - # create bag for each example - example.set( - MESSAGE_VECTOR_FEATURE_NAMES[attribute], - self._combine_with_existing_features( - example, - attribute_features[i], - MESSAGE_VECTOR_FEATURE_NAMES[attribute], - ), - ) - - def _get_all_attributes_processed_texts( - self, training_data: "TrainingData" - ) -> Dict[Text, List[Text]]: - """Get processed text for all attributes of examples in training data""" - - processed_attribute_texts = {} - for attribute in MESSAGE_ATTRIBUTES: - attribute_texts = [ - self._get_message_text_by_attribute(example, attribute) - for example in training_data.intent_examples - ] - self._check_OOV_present(attribute_texts) - processed_attribute_texts[attribute] = attribute_texts - return processed_attribute_texts - - @staticmethod - def create_shared_vocab_vectorizers( - token_pattern, - strip_accents, - lowercase, - stop_words, - ngram_range, - max_df, - min_df, - max_features, - analyzer, - vocabulary=None, - ) -> Dict[Text, "CountVectorizer"]: - """Create vectorizers for all attributes with shared vocabulary""" - - shared_vectorizer = CountVectorizer( - token_pattern=token_pattern, - strip_accents=strip_accents, - lowercase=lowercase, - stop_words=stop_words, - ngram_range=ngram_range, - max_df=max_df, - min_df=min_df, - max_features=max_features, - analyzer=analyzer, - vocabulary=vocabulary, - ) - - attribute_vectorizers = {} - - for attribute in MESSAGE_ATTRIBUTES: - attribute_vectorizers[attribute] = shared_vectorizer - - return attribute_vectorizers - - @staticmethod - def create_independent_vocab_vectorizers( - token_pattern, - strip_accents, - lowercase, - stop_words, - ngram_range, - max_df, - min_df, - max_features, - analyzer, - vocabulary=None, - ) -> Dict[Text, "CountVectorizer"]: - """Create vectorizers for all attributes with independent vocabulary""" - - attribute_vectorizers = {} - - for attribute in MESSAGE_ATTRIBUTES: - - attribute_vocabulary = vocabulary[attribute] if vocabulary else None - - attribute_vectorizer = CountVectorizer( - token_pattern=token_pattern, - strip_accents=strip_accents, - lowercase=lowercase, - stop_words=stop_words, - ngram_range=ngram_range, - max_df=max_df, - min_df=min_df, - max_features=max_features, - analyzer=analyzer, - vocabulary=attribute_vocabulary, - ) - attribute_vectorizers[attribute] = attribute_vectorizer - - return attribute_vectorizers - - def _train_with_shared_vocab(self, attribute_texts: Dict[Text, List[Text]]): - """Construct the vectorizers and train them with a shared vocab""" - - self.vectorizers = self.create_shared_vocab_vectorizers( - self.token_pattern, - self.strip_accents, - self.lowercase, - self.stop_words, - (self.min_ngram, self.max_ngram), - self.max_df, - self.min_df, - self.max_features, - self.analyzer, - ) - - combined_cleaned_texts = [] - for attribute in MESSAGE_ATTRIBUTES: - combined_cleaned_texts += attribute_texts[attribute] - - try: - self.vectorizers[MESSAGE_TEXT_ATTRIBUTE].fit(combined_cleaned_texts) - except ValueError: - logger.warning( - "Unable to train a shared CountVectorizer. Leaving an untrained CountVectorizer" - ) - - @staticmethod - def _attribute_texts_is_non_empty(attribute_texts): - return any(attribute_texts) - - def _train_with_independent_vocab(self, attribute_texts: Dict[Text, List[Text]]): - """Construct the vectorizers and train them with an independent vocab""" - - self.vectorizers = self.create_independent_vocab_vectorizers( - self.token_pattern, - self.strip_accents, - self.lowercase, - self.stop_words, - (self.min_ngram, self.max_ngram), - self.max_df, - self.min_df, - self.max_features, - self.analyzer, - ) - - for attribute in MESSAGE_ATTRIBUTES: - if self._attribute_texts_is_non_empty(attribute_texts[attribute]): - try: - self.vectorizers[attribute].fit(attribute_texts[attribute]) - except ValueError: - logger.warning( - "Unable to train CountVectorizer for message attribute {}. " - "Leaving an untrained CountVectorizer for it".format(attribute) - ) - else: - logger.debug( - "No text provided for {} attribute in any messages of training data. Skipping " - "training a CountVectorizer for it.".format(attribute) - ) - - def _get_featurized_attribute( - self, attribute: Text, attribute_texts: List[Text] - ) -> Optional[np.ndarray]: - """Return features of a particular attribute for complete data""" - - if self._check_attribute_vocabulary(attribute): - # count vectorizer was trained - featurized_attributes = ( - self.vectorizers[attribute].transform(attribute_texts).toarray() - ) - return featurized_attributes - else: - return None - - def train( - self, training_data: TrainingData, cfg: RasaNLUModelConfig = None, **kwargs: Any - ) -> None: - """Train the featurizer. - - Take parameters from config and - construct a new count vectorizer using the sklearn framework. - """ - - spacy_nlp = kwargs.get("spacy_nlp") - if spacy_nlp is not None: - # create spacy lemma_ for OOV_words - self.OOV_words = [t.lemma_ for w in self.OOV_words for t in spacy_nlp(w)] - - # process sentences and collect data for all attributes - processed_attribute_texts = self._get_all_attributes_processed_texts( - training_data - ) - - # train for all attributes - if self.use_shared_vocab: - self._train_with_shared_vocab(processed_attribute_texts) - else: - self._train_with_independent_vocab(processed_attribute_texts) - - # transform for all attributes - for attribute in MESSAGE_ATTRIBUTES: - - attribute_features = self._get_featurized_attribute( - attribute, processed_attribute_texts[attribute] - ) - - if attribute_features is not None: - self._set_attribute_features( - attribute, attribute_features, training_data - ) - - def process(self, message: Message, **kwargs: Any) -> None: - """Process incoming message and compute and set features""" - - if self.vectorizers is None: - logger.error( - "There is no trained CountVectorizer: " - "component is either not trained or " - "didn't receive enough training data" - ) - else: - message_text = self._get_message_text_by_attribute( - message, attribute=MESSAGE_TEXT_ATTRIBUTE - ) - - bag = ( - self.vectorizers[MESSAGE_TEXT_ATTRIBUTE] - .transform([message_text]) - .toarray() - .squeeze() - ) - message.set( - MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], - self._combine_with_existing_features( - message, - bag, - feature_name=MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], - ), - ) - - @staticmethod - def _is_any_model_trained(attribute_vocabularies) -> bool: - """Check if any model got trained""" - - return any(value is not None for value in attribute_vocabularies.values()) - - def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: - """Persist this model into the passed directory. - - Returns the metadata necessary to load the model again. - """ - - file_name = file_name + ".pkl" - - if self.vectorizers: - # vectorizer instance was not None, some models could have been trained - attribute_vocabularies = self._collect_vectorizer_vocabularies() - if self._is_any_model_trained(attribute_vocabularies): - # Definitely need to persist some vocabularies - featurizer_file = os.path.join(model_dir, file_name) - - if self.use_shared_vocab: - # Only persist vocabulary from one attribute. Can be loaded and distributed to all attributes. - utils.json_pickle( - featurizer_file, attribute_vocabularies[MESSAGE_TEXT_ATTRIBUTE] - ) - else: - utils.json_pickle(featurizer_file, attribute_vocabularies) - return {"file": file_name} - - @classmethod - def load( - cls, - meta: Dict[Text, Any], - model_dir: Text = None, - model_metadata: Metadata = None, - cached_component: Optional["CountVectorsFeaturizer"] = None, - **kwargs: Any - ) -> "CountVectorsFeaturizer": - - file_name = meta.get("file") - featurizer_file = os.path.join(model_dir, file_name) - - if os.path.exists(featurizer_file): - vocabulary = utils.json_unpickle(featurizer_file) - - share_vocabulary = meta["use_shared_vocab"] - - if share_vocabulary: - vectorizers = cls.create_shared_vocab_vectorizers( - token_pattern=meta["token_pattern"], - strip_accents=meta["strip_accents"], - lowercase=meta["lowercase"], - stop_words=meta["stop_words"], - ngram_range=(meta["min_ngram"], meta["max_ngram"]), - max_df=meta["max_df"], - min_df=meta["min_df"], - max_features=meta["max_features"], - analyzer=meta["analyzer"], - vocabulary=vocabulary, - ) - else: - vectorizers = cls.create_independent_vocab_vectorizers( - token_pattern=meta["token_pattern"], - strip_accents=meta["strip_accents"], - lowercase=meta["lowercase"], - stop_words=meta["stop_words"], - ngram_range=(meta["min_ngram"], meta["max_ngram"]), - max_df=meta["max_df"], - min_df=meta["min_df"], - max_features=meta["max_features"], - analyzer=meta["analyzer"], - vocabulary=vocabulary, - ) - - return cls(meta, vectorizers) - else: - return cls(meta) diff --git a/rasa/nlu/featurizers/dense_featurizer/__init__.py b/rasa/nlu/featurizers/dense_featurizer/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py new file mode 100644 index 000000000000..f0ca09c95e84 --- /dev/null +++ b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py @@ -0,0 +1,245 @@ +import logging + +from typing import Any, Dict, List, NoReturn, Optional, Text, Tuple, Type +from tqdm import tqdm + +from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import DenseFeaturizer, Features +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + DENSE_FEATURIZABLE_ATTRIBUTES, + FEATURIZER_CLASS_ALIAS, + FEATURE_TYPE_SEQUENCE, + FEATURE_TYPE_SENTENCE, + TOKENS_NAMES, +) +import numpy as np +import tensorflow as tf + +import rasa.utils.train_utils as train_utils +import rasa.utils.common as common_utils + +logger = logging.getLogger(__name__) + + +class ConveRTFeaturizer(DenseFeaturizer): + """Featurizer using ConveRT model. + + Loads the ConveRT(https://github.com/PolyAI-LDN/polyai-models#convert) + model from TFHub and computes sentence and sequence level feature representations + for dense featurizable attributes of each message object. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [ConveRTTokenizer] + + @classmethod + def required_packages(cls) -> List[Text]: + return ["tensorflow_text", "tensorflow_hub"] + + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + + super(ConveRTFeaturizer, self).__init__(component_config) + + def __get_signature(self, signature: Text, module: Any) -> NoReturn: + """Retrieve a signature from a (hopefully loaded) TF model.""" + + if not module: + raise Exception( + "ConveRTFeaturizer needs a proper loaded tensorflow module when used. " + "Make sure to pass a module when training and using the component." + ) + + return module.signatures[signature] + + def _compute_features( + self, batch_examples: List[Message], module: Any, attribute: Text = TEXT + ) -> Tuple[np.ndarray, np.ndarray]: + + sentence_encodings = self._compute_sentence_encodings( + batch_examples, module, attribute + ) + + ( + sequence_encodings, + number_of_tokens_in_sentence, + ) = self._compute_sequence_encodings(batch_examples, module, attribute) + + return self._get_features( + sentence_encodings, sequence_encodings, number_of_tokens_in_sentence + ) + + def _compute_sentence_encodings( + self, batch_examples: List[Message], module: Any, attribute: Text = TEXT + ) -> np.ndarray: + # Get text for attribute of each example + batch_attribute_text = [ex.get(attribute) for ex in batch_examples] + sentence_encodings = self._sentence_encoding_of_text( + batch_attribute_text, module + ) + + # convert them to a sequence of 1 + return np.reshape(sentence_encodings, (len(batch_examples), 1, -1)) + + def _compute_sequence_encodings( + self, batch_examples: List[Message], module: Any, attribute: Text = TEXT + ) -> Tuple[np.ndarray, List[int]]: + list_of_tokens = [ + example.get(TOKENS_NAMES[attribute]) for example in batch_examples + ] + + number_of_tokens_in_sentence = [ + len(sent_tokens) for sent_tokens in list_of_tokens + ] + + # join the tokens to get a clean text to ensure the sequence length of + # the returned embeddings from ConveRT matches the length of the tokens + # (including sub-tokens) + tokenized_texts = self._tokens_to_text(list_of_tokens) + token_features = self._sequence_encoding_of_text(tokenized_texts, module) + + # ConveRT might split up tokens into sub-tokens + # take the mean of the sub-token vectors and use that as the token vector + token_features = train_utils.align_token_features( + list_of_tokens, token_features + ) + + return token_features, number_of_tokens_in_sentence + + @staticmethod + def _get_features( + sentence_encodings: np.ndarray, + sequence_encodings: np.ndarray, + number_of_tokens_in_sentence: List[int], + ) -> Tuple[np.ndarray, np.ndarray]: + """Get the sequence and sentence features.""" + + sentence_embeddings = [] + sequence_embeddings = [] + + for index in range(len(number_of_tokens_in_sentence)): + sequence_length = number_of_tokens_in_sentence[index] + sequence_encoding = sequence_encodings[index][:sequence_length] + sentence_encoding = sentence_encodings[index] + + sequence_embeddings.append(sequence_encoding) + sentence_embeddings.append(sentence_encoding) + + return np.array(sequence_embeddings), np.array(sentence_embeddings) + + @staticmethod + def _tokens_to_text(list_of_tokens: List[List[Token]]) -> List[Text]: + """Convert list of tokens to text. + + Add a whitespace between two tokens if the end value of the first tokens is + not the same as the end value of the second token.""" + texts = [] + for tokens in list_of_tokens: + text = "" + offset = 0 + for token in tokens: + if offset != token.start: + text += " " + text += token.text + + offset = token.end + texts.append(text) + + return texts + + def _sentence_encoding_of_text(self, batch: List[Text], module: Any) -> np.ndarray: + signature = self.__get_signature("default", module) + return signature(tf.convert_to_tensor(batch))["default"].numpy() + + def _sequence_encoding_of_text(self, batch: List[Text], module: Any) -> np.ndarray: + signature = self.__get_signature("encode_sequence", module) + + return signature(tf.convert_to_tensor(batch))["sequence_encoding"].numpy() + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + *, + tf_hub_module: Any = None, + **kwargs: Any, + ) -> None: + if config is not None and config.language != "en": + common_utils.raise_warning( + f"Since ``ConveRT`` model is trained only on an english " + f"corpus of conversations, this featurizer should only be " + f"used if your training data is in english language. " + f"However, you are training in '{config.language}'. ", + docs=DOCS_URL_COMPONENTS + "#convertfeaturizer", + ) + + batch_size = 64 + + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + + non_empty_examples = list( + filter(lambda x: x.get(attribute), training_data.training_examples) + ) + + progress_bar = tqdm( + range(0, len(non_empty_examples), batch_size), + desc=attribute.capitalize() + " batches", + ) + for batch_start_index in progress_bar: + batch_end_index = min( + batch_start_index + batch_size, len(non_empty_examples) + ) + + # Collect batch examples + batch_examples = non_empty_examples[batch_start_index:batch_end_index] + + ( + batch_sequence_features, + batch_sentence_features, + ) = self._compute_features(batch_examples, tf_hub_module, attribute) + + self._set_features( + batch_examples, + batch_sequence_features, + batch_sentence_features, + attribute, + ) + + def process( + self, message: Message, *, tf_hub_module: Any = None, **kwargs: Any + ) -> None: + sequence_features, sentence_features = self._compute_features( + [message], tf_hub_module + ) + + self._set_features([message], sequence_features, sentence_features, TEXT) + + def _set_features( + self, + examples: List[Message], + sequence_features: np.ndarray, + sentence_features: np.ndarray, + attribute: Text, + ) -> None: + for index, example in enumerate(examples): + _sequence_features = Features( + sequence_features[index], + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + example.add_features(_sequence_features) + + _sentence_features = Features( + sentence_features[index], + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + example.add_features(_sentence_features) diff --git a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py new file mode 100644 index 000000000000..a22b8acfdc21 --- /dev/null +++ b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py @@ -0,0 +1,79 @@ +from typing import Any, Optional, Text, List, Type + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import DenseFeaturizer, Features +from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP +from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + LANGUAGE_MODEL_DOCS, + DENSE_FEATURIZABLE_ATTRIBUTES, + SEQUENCE_FEATURES, + SENTENCE_FEATURES, + FEATURE_TYPE_SENTENCE, + FEATURE_TYPE_SEQUENCE, + FEATURIZER_CLASS_ALIAS, +) + + +class LanguageModelFeaturizer(DenseFeaturizer): + """Featurizer using transformer based language models. + + Uses the output of HFTransformersNLP component to set the sequence and sentence + level representations for dense featurizable attributes of each message object. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [HFTransformersNLP, LanguageModelTokenizer] + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + + for example in training_data.training_examples: + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + self._set_lm_features(example, attribute) + + def _get_doc(self, message: Message, attribute: Text) -> Any: + """ + Get the language model doc. A doc consists of + {'token_ids': ..., 'tokens': ..., + 'sequence_features': ..., 'sentence_features': ...} + """ + return message.get(LANGUAGE_MODEL_DOCS[attribute]) + + def process(self, message: Message, **kwargs: Any) -> None: + """Sets the dense features from the language model doc to the incoming + message.""" + self._set_lm_features(message) + + def _set_lm_features(self, message: Message, attribute: Text = TEXT) -> None: + """Adds the precomputed word vectors to the messages features.""" + doc = self._get_doc(message, attribute) + + if doc is None: + return + + sequence_features = doc[SEQUENCE_FEATURES] + sentence_features = doc[SENTENCE_FEATURES] + + final_sequence_features = Features( + sequence_features, + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + final_sentence_features = Features( + sentence_features, + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sentence_features) diff --git a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py new file mode 100644 index 000000000000..ec9e3879d4ae --- /dev/null +++ b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py @@ -0,0 +1,133 @@ +import numpy as np +import typing +from typing import Any, List, Text, Optional, Dict, Type, Tuple + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import DenseFeaturizer, Features +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.utils.mitie_utils import MitieNLP +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + DENSE_FEATURIZABLE_ATTRIBUTES, + FEATURE_TYPE_SENTENCE, + FEATURE_TYPE_SEQUENCE, + FEATURIZER_CLASS_ALIAS, + TOKENS_NAMES, +) +from rasa.utils.tensorflow.constants import MEAN_POOLING, POOLING + +if typing.TYPE_CHECKING: + import mitie + + +class MitieFeaturizer(DenseFeaturizer): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [MitieNLP, Tokenizer] + + defaults = { + # Specify what pooling operation should be used to calculate the vector of + # the complete utterance. Available options: 'mean' and 'max' + POOLING: MEAN_POOLING + } + + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + super().__init__(component_config) + + self.pooling_operation = self.component_config["pooling"] + + @classmethod + def required_packages(cls) -> List[Text]: + return ["mitie", "numpy"] + + def ndim(self, feature_extractor: "mitie.total_word_feature_extractor") -> int: + return feature_extractor.num_dimensions + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + + mitie_feature_extractor = self._mitie_feature_extractor(**kwargs) + for example in training_data.intent_examples: + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + self.process_training_example( + example, attribute, mitie_feature_extractor + ) + + def process_training_example( + self, example: Message, attribute: Text, mitie_feature_extractor: Any + ): + tokens = example.get(TOKENS_NAMES[attribute]) + + if tokens is not None: + sequence_features, sentence_features = self.features_for_tokens( + tokens, mitie_feature_extractor + ) + + self._set_features(example, sequence_features, sentence_features, attribute) + + def process(self, message: Message, **kwargs: Any) -> None: + mitie_feature_extractor = self._mitie_feature_extractor(**kwargs) + tokens = message.get(TOKENS_NAMES[TEXT]) + sequence_features, sentence_features = self.features_for_tokens( + tokens, mitie_feature_extractor + ) + + self._set_features(message, sequence_features, sentence_features, TEXT) + + def _set_features( + self, + message: Message, + sequence_features: np.ndarray, + sentence_features: np.ndarray, + attribute: Text, + ): + final_sequence_features = Features( + sequence_features, + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + + final_sentence_features = Features( + sentence_features, + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sentence_features) + + def _mitie_feature_extractor(self, **kwargs) -> Any: + mitie_feature_extractor = kwargs.get("mitie_feature_extractor") + if not mitie_feature_extractor: + raise Exception( + "Failed to train 'MitieFeaturizer'. " + "Missing a proper MITIE feature extractor. " + "Make sure this component is preceded by " + "the 'MitieNLP' component in the pipeline " + "configuration." + ) + return mitie_feature_extractor + + def features_for_tokens( + self, + tokens: List[Token], + feature_extractor: "mitie.total_word_feature_extractor", + ) -> Tuple[np.ndarray, np.ndarray]: + # calculate features + sequence_features = [] + for token in tokens: + sequence_features.append(feature_extractor.get_feature_vector(token.text)) + sequence_features = np.array(sequence_features) + + sentence_fetaures = self._calculate_sentence_features( + sequence_features, self.pooling_operation + ) + + return sequence_features, sentence_fetaures diff --git a/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py new file mode 100644 index 000000000000..d5c1df68846c --- /dev/null +++ b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py @@ -0,0 +1,96 @@ +import numpy as np +import typing +import logging +from typing import Any, Optional, Text, Dict, List, Type + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import DenseFeaturizer, Features +from rasa.nlu.utils.spacy_utils import SpacyNLP +from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + SPACY_DOCS, + DENSE_FEATURIZABLE_ATTRIBUTES, + FEATURE_TYPE_SENTENCE, + FEATURE_TYPE_SEQUENCE, + FEATURIZER_CLASS_ALIAS, +) +from rasa.utils.tensorflow.constants import POOLING, MEAN_POOLING + +if typing.TYPE_CHECKING: + from spacy.tokens import Doc + + +logger = logging.getLogger(__name__) + + +class SpacyFeaturizer(DenseFeaturizer): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [SpacyNLP, SpacyTokenizer] + + defaults = { + # Specify what pooling operation should be used to calculate the vector of + # the complete utterance. Available options: 'mean' and 'max' + POOLING: MEAN_POOLING + } + + def __init__(self, component_config: Optional[Dict[Text, Any]] = None): + super().__init__(component_config) + + self.pooling_operation = self.component_config[POOLING] + + def _features_for_doc(self, doc: "Doc") -> np.ndarray: + """Feature vector for a single document / sentence / tokens.""" + return np.array([t.vector for t in doc if t.text and t.text.strip()]) + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + + for example in training_data.intent_examples: + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + self._set_spacy_features(example, attribute) + + def get_doc(self, message: Message, attribute: Text) -> Any: + return message.get(SPACY_DOCS[attribute]) + + def process(self, message: Message, **kwargs: Any) -> None: + self._set_spacy_features(message) + + def _set_spacy_features(self, message: Message, attribute: Text = TEXT) -> None: + """Adds the spacy word vectors to the messages features.""" + doc = self.get_doc(message, attribute) + + if doc is None: + return + + # in case an empty spaCy model was used, no vectors are present + if doc.vocab.vectors_length == 0: + logger.debug("No features present. You are using an empty spaCy model.") + return + + sequence_features = self._features_for_doc(doc) + sentence_features = self._calculate_sentence_features( + sequence_features, self.pooling_operation + ) + + final_sequence_features = Features( + sequence_features, + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + final_sentence_features = Features( + sentence_features, + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sentence_features) diff --git a/rasa/nlu/featurizers/featurizer.py b/rasa/nlu/featurizers/featurizer.py new file mode 100644 index 000000000000..70b3f1183c43 --- /dev/null +++ b/rasa/nlu/featurizers/featurizer.py @@ -0,0 +1,138 @@ +import numpy as np +import scipy.sparse +from typing import Text, Union, Optional, Dict, Any + +from rasa.nlu.constants import FEATURIZER_CLASS_ALIAS +from rasa.nlu.constants import VALID_FEATURE_TYPES +from rasa.nlu.components import Component +from rasa.utils.tensorflow.constants import MEAN_POOLING, MAX_POOLING + + +class Features: + """Stores the features produces by any featurizer.""" + + def __init__( + self, + features: Union[np.ndarray, scipy.sparse.spmatrix], + feature_type: Text, + message_attribute: Text, + origin: Text, + ) -> None: + self._validate_feature_type(feature_type) + + self.features = features + self.type = feature_type + self.origin = origin + self.message_attribute = message_attribute + + @staticmethod + def _validate_feature_type(feature_type: Text) -> None: + if feature_type not in VALID_FEATURE_TYPES: + raise ValueError( + f"Invalid feature type '{feature_type}' used. Valid feature types are: " + f"{VALID_FEATURE_TYPES}." + ) + + def is_sparse(self) -> bool: + """Checks if features are sparse or not. + + Returns: + True, if features are sparse, false otherwise. + """ + return isinstance(self.features, scipy.sparse.spmatrix) + + def is_dense(self) -> bool: + """Checks if features are dense or not. + + Returns: + True, if features are dense, false otherwise. + """ + return not self.is_sparse() + + def combine_with_features( + self, additional_features: Optional[Union[np.ndarray, scipy.sparse.spmatrix]] + ) -> Optional[Union[np.ndarray, scipy.sparse.spmatrix]]: + """Combine the incoming features with this instance's features. + + Args: + additional_features: additional features to add + + Returns: + Combined features. + """ + if additional_features is None: + return self.features + + if self.is_dense() and isinstance(additional_features, np.ndarray): + return self._combine_dense_features(self.features, additional_features) + + if self.is_sparse() and isinstance(additional_features, scipy.sparse.spmatrix): + return self._combine_sparse_features(self.features, additional_features) + + raise ValueError("Cannot combine sparse and dense features.") + + @staticmethod + def _combine_dense_features( + features: np.ndarray, additional_features: np.ndarray + ) -> np.ndarray: + if features.ndim != additional_features.ndim: + raise ValueError( + f"Cannot combine dense features as sequence dimensions do not " + f"match: {features.ndim} != {additional_features.ndim}." + ) + + return np.concatenate((features, additional_features), axis=-1) + + @staticmethod + def _combine_sparse_features( + features: scipy.sparse.spmatrix, additional_features: scipy.sparse.spmatrix + ) -> scipy.sparse.spmatrix: + from scipy.sparse import hstack + + if features.shape[0] != additional_features.shape[0]: + raise ValueError( + f"Cannot combine sparse features as sequence dimensions do not " + f"match: {features.shape[0]} != {additional_features.shape[0]}." + ) + + return hstack([features, additional_features]) + + +class Featurizer(Component): + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + if not component_config: + component_config = {} + + # makes sure the alias name is set + component_config.setdefault(FEATURIZER_CLASS_ALIAS, self.name) + + super().__init__(component_config) + + +class DenseFeaturizer(Featurizer): + @staticmethod + def _calculate_sentence_features( + features: np.ndarray, pooling_operation: Text + ) -> np.ndarray: + # take only non zeros feature vectors into account + non_zero_features = np.array([f for f in features if f.any()]) + + # if features are all zero just return a vector with all zeros + if non_zero_features.size == 0: + return np.zeros([1, features.shape[-1]]) + + if pooling_operation == MEAN_POOLING: + return np.mean(non_zero_features, axis=0, keepdims=True) + + if pooling_operation == MAX_POOLING: + return np.max(non_zero_features, axis=0, keepdims=True) + + raise ValueError( + f"Invalid pooling operation specified. Available operations are " + f"'{MEAN_POOLING}' or '{MAX_POOLING}', but provided value is " + f"'{pooling_operation}'." + ) + + +class SparseFeaturizer(Featurizer): + pass diff --git a/rasa/nlu/featurizers/mitie_featurizer.py b/rasa/nlu/featurizers/mitie_featurizer.py deleted file mode 100644 index 9d0dbb8f5a7c..000000000000 --- a/rasa/nlu/featurizers/mitie_featurizer.py +++ /dev/null @@ -1,106 +0,0 @@ -import numpy as np -import typing -from typing import Any, List, Text - -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.featurizers import Featurizer -from rasa.nlu.tokenizers import Token -from rasa.nlu.training_data import Message, TrainingData - -if typing.TYPE_CHECKING: - import mitie - -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, -) - - -class MitieFeaturizer(Featurizer): - - provides = [ - MESSAGE_VECTOR_FEATURE_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES - ] - - requires = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES] + [ - "mitie_feature_extractor" - ] - - @classmethod - def required_packages(cls) -> List[Text]: - return ["mitie", "numpy"] - - def ndim(self, feature_extractor: "mitie.total_word_feature_extractor"): - - return feature_extractor.num_dimensions - - def get_tokens_by_attribute(self, example, attribute): - - return example.get(MESSAGE_TOKENS_NAMES[attribute]) - - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - mitie_feature_extractor = self._mitie_feature_extractor(**kwargs) - for example in training_data.intent_examples: - - for attribute in MESSAGE_ATTRIBUTES: - - attribute_tokens = self.get_tokens_by_attribute(example, attribute) - if attribute_tokens is not None: - - features = self.features_for_tokens( - attribute_tokens, mitie_feature_extractor - ) - example.set( - MESSAGE_VECTOR_FEATURE_NAMES[attribute], - self._combine_with_existing_features( - example, features, MESSAGE_VECTOR_FEATURE_NAMES[attribute] - ), - ) - - def process(self, message: Message, **kwargs: Any) -> None: - - mitie_feature_extractor = self._mitie_feature_extractor(**kwargs) - features = self.features_for_tokens( - message.get(MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE]), - mitie_feature_extractor, - ) - message.set( - MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], - self._combine_with_existing_features( - message, features, MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE] - ), - ) - - def _mitie_feature_extractor(self, **kwargs): - mitie_feature_extractor = kwargs.get("mitie_feature_extractor") - if not mitie_feature_extractor: - raise Exception( - "Failed to train 'MitieFeaturizer'. " - "Missing a proper MITIE feature extractor. " - "Make sure this component is preceded by " - "the 'MitieNLP' component in the pipeline " - "configuration." - ) - return mitie_feature_extractor - - def features_for_tokens( - self, - tokens: List[Token], - feature_extractor: "mitie.total_word_feature_extractor", - ) -> np.ndarray: - - vec = np.zeros(self.ndim(feature_extractor)) - for token in tokens: - vec += feature_extractor.get_feature_vector(token.text) - if tokens: - return vec / len(tokens) - else: - return vec diff --git a/rasa/nlu/featurizers/ngram_featurizer.py b/rasa/nlu/featurizers/ngram_featurizer.py deleted file mode 100644 index 8ed675bb09cb..000000000000 --- a/rasa/nlu/featurizers/ngram_featurizer.py +++ /dev/null @@ -1,433 +0,0 @@ -import time -from collections import Counter - -import logging -import numpy as np -import os -import typing -import warnings -from string import punctuation -from typing import Any, Dict, List, Optional, Text - -from rasa.nlu import utils -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.featurizers import Featurizer -from rasa.nlu.training_data import Message, TrainingData -from rasa.nlu.utils import write_json_to_file -import rasa.utils.io -from rasa.nlu.constants import ( - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_VECTOR_FEATURE_NAMES, -) - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from rasa.nlu.model import Metadata - - -class NGramFeaturizer(Featurizer): - - provides = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - requires = [MESSAGE_SPACY_FEATURES_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - defaults = { - # defines the maximum number of ngrams to collect and add - # to the featurization of a sentence - "max_number_of_ngrams": 10, - # the minimal length in characters of an ngram to be eligible - "ngram_min_length": 3, - # the maximal length in characters of an ngram to be eligible - "ngram_max_length": 17, - # the minimal number of times an ngram needs to occur in the - # training data to be considered as a feature - "ngram_min_occurrences": 5, - # during cross validation (used to detect which ngrams are most - # valuable) every intent with fever examples than this config - # value will be excluded - "min_intent_examples": 4, - } - - def __init__( - self, - component_config: Optional[Dict[Text, Any]] = None, - all_ngrams: Optional[List[Text]] = None, - best_num_ngrams: Optional[int] = None, - ): - super(NGramFeaturizer, self).__init__(component_config) - - self.best_num_ngrams = best_num_ngrams - self.all_ngrams = all_ngrams - - @classmethod - def required_packages(cls) -> List[Text]: - return ["spacy", "sklearn"] - - def train( - self, training_data: TrainingData, cfg: RasaNLUModelConfig, **kwargs: Any - ): - - start = time.time() - self.train_on_sentences(training_data.intent_examples) - logger.debug("Ngram collection took {} seconds".format(time.time() - start)) - - for example in training_data.training_examples: - updated = self._text_features_with_ngrams(example, self.best_num_ngrams) - example.set(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], updated) - - def process(self, message: Message, **kwargs: Any): - - updated = self._text_features_with_ngrams(message, self.best_num_ngrams) - message.set(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], updated) - - def _text_features_with_ngrams(self, message, max_ngrams): - - ngrams_to_use = self._ngrams_to_use(max_ngrams) - - if ngrams_to_use is not None: - extras = np.array(self._ngrams_in_sentence(message, ngrams_to_use)) - return self._combine_with_existing_features(message, extras) - else: - return message.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]) - - @classmethod - def load( - cls, - meta: Dict[Text, Any], - model_dir: Optional[Text] = None, - model_metadata: Optional["Metadata"] = None, - cached_component: Optional["NGramFeaturizer"] = None, - **kwargs: Any - ) -> "NGramFeaturizer": - - file_name = meta.get("file") - featurizer_file = os.path.join(model_dir, file_name) - - if os.path.exists(featurizer_file): - data = rasa.utils.io.read_json_file(featurizer_file) - return NGramFeaturizer(meta, data["all_ngrams"], data["best_num_ngrams"]) - else: - return NGramFeaturizer(meta) - - def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: - """Persist this model into the passed directory.""" - - file_name = file_name + ".json" - featurizer_file = os.path.join(model_dir, file_name) - data = {"all_ngrams": self.all_ngrams, "best_num_ngrams": self.best_num_ngrams} - - write_json_to_file(featurizer_file, data, separators=(",", ": ")) - - return {"file": file_name} - - def train_on_sentences(self, examples): - labels = [e.get("intent") for e in examples] - self.all_ngrams = self._get_best_ngrams(examples, labels) - self.best_num_ngrams = self._cross_validation(examples, labels) - - def _ngrams_to_use(self, num_ngrams): - if num_ngrams == 0 or self.all_ngrams is None: - return [] - elif num_ngrams is not None: - return self.all_ngrams[:num_ngrams] - else: - return self.all_ngrams - - def _get_best_ngrams(self, examples, labels): - """Return an ordered list of the best character ngrams.""" - - oov_strings = self._remove_in_vocab_words(examples) - ngrams = self._generate_all_ngrams( - oov_strings, self.component_config["ngram_min_length"] - ) - return self._sort_applicable_ngrams(ngrams, examples, labels) - - def _remove_in_vocab_words(self, examples): - """Automatically removes words with digits in them, that may be a - hyperlink or that _are_ in vocabulary for the nlp.""" - - new_sents = [] - for example in examples: - new_sents.append(self._remove_in_vocab_words_from_sentence(example)) - return new_sents - - @staticmethod - def _is_ngram_worthy(token): - """Decide if we should use this token for ngram counting. - - Excludes every word with digits in them, hyperlinks or - an assigned word vector.""" - return ( - not token.has_vector - and not token.like_url - and not token.like_num - and not token.like_email - and not token.is_punct - ) - - def _remove_in_vocab_words_from_sentence(self, example): - """Filter for words that do not have a word vector.""" - - cleaned_tokens = [ - token - for token in example.get( - MESSAGE_SPACY_FEATURES_NAMES[MESSAGE_TEXT_ATTRIBUTE] - ) - if self._is_ngram_worthy(token) - ] - - # keep only out-of-vocab 'non_word' words - non_words = " ".join([t.text for t in cleaned_tokens]) - - # remove digits and extra spaces - non_words = "".join([letter for letter in non_words if not letter.isdigit()]) - non_words = " ".join([word for word in non_words.split(" ") if word != ""]) - - # add cleaned sentence to list of these sentences - return non_words - - def _intents_with_enough_examples(self, labels, examples): - """Filter examples where we do not have a min number of examples.""" - - min_intent_examples = self.component_config["min_intent_examples"] - usable_labels = [] - - for label in np.unique(labels): - lab_sents = np.array(examples)[np.array(labels) == label] - if len(lab_sents) < min_intent_examples: - continue - usable_labels.append(label) - - return usable_labels - - def _rank_ngrams_using_cv(self, examples, labels, list_of_ngrams): - from sklearn import linear_model - - X = np.array(self._ngrams_in_sentences(examples, list_of_ngrams)) - y = self.encode_labels(labels) - - clf = linear_model.RandomizedLogisticRegression(C=1) - clf.fit(X, y) - - # sort the ngrams according to the classification score - scores = clf.scores_ - sorted_idxs = sorted(enumerate(scores), key=lambda x: -1 * x[1]) - sorted_ngrams = [list_of_ngrams[i[0]] for i in sorted_idxs] - - return sorted_ngrams - - def _sort_applicable_ngrams(self, ngrams_list, examples, labels): - """Given an intent classification problem and a list of ngrams, - - creates ordered list of most useful ngrams.""" - - if not ngrams_list: - return [] - - # make sure we have enough labeled instances for cv - usable_labels = self._intents_with_enough_examples(labels, examples) - - mask = [label in usable_labels for label in labels] - if any(mask) and len(usable_labels) >= 2: - try: - examples = np.array(examples)[mask] - labels = np.array(labels)[mask] - - return self._rank_ngrams_using_cv(examples, labels, ngrams_list) - except ValueError as e: - if "needs samples of at least 2 classes" in str(e): - # we got unlucky during the random - # sampling :( and selected a slice that - # only contains one class - return [] - else: - raise e - else: - # there is no example we can use for the cross validation - return [] - - def _ngrams_in_sentences(self, examples, ngrams): - """Given a set of sentences, returns a feature vector for each sentence. - - The first $k$ elements are from the `intent_features`, - the rest are {1,0} elements denoting whether an ngram is in sentence.""" - - all_vectors = [] - for example in examples: - presence_vector = self._ngrams_in_sentence(example, ngrams) - all_vectors.append(presence_vector) - return all_vectors - - def _ngrams_in_sentence(self, example, ngrams): - """Given a set of sentences, return a vector indicating ngram presence. - - The vector will return 1 entries if the corresponding ngram is - present in the sentence and 0 if it is not.""" - - cleaned_sentence = self._remove_in_vocab_words_from_sentence(example) - presence_vector = np.zeros(len(ngrams)) - idx_array = [ - idx for idx in range(len(ngrams)) if ngrams[idx] in cleaned_sentence - ] - presence_vector[idx_array] = 1 - return presence_vector - - def _generate_all_ngrams(self, list_of_strings, ngram_min_length): - """Takes a list of strings and generates all character ngrams. - - Generated ngrams are at least 3 characters (and at most 17), - occur at least 5 times and occur independently of longer - superset ngrams at least once.""" - - features = {} - counters = {ngram_min_length - 1: Counter()} - max_length = self.component_config["ngram_max_length"] - - for n in range(ngram_min_length, max_length): - candidates = [] - features[n] = [] - counters[n] = Counter() - - # generate all possible n length ngrams - for text in list_of_strings: - text = text.replace(punctuation, " ") - for word in text.lower().split(" "): - cands = [word[i : i + n] for i in range(len(word) - n)] - for cand in cands: - counters[n][cand] += 1 - if cand not in candidates: - candidates.append(cand) - - min_count = self.component_config["ngram_min_occurrences"] - # iterate over these candidates picking only the applicable ones - for can in candidates: - if counters[n][can] >= min_count: - features[n].append(can) - begin = can[:-1] - end = can[1:] - if n >= ngram_min_length: - if ( - counters[n - 1][begin] == counters[n][can] - and begin in features[n - 1] - ): - features[n - 1].remove(begin) - if ( - counters[n - 1][end] == counters[n][can] - and end in features[n - 1] - ): - features[n - 1].remove(end) - - return [item for sublist in list(features.values()) for item in sublist] - - @staticmethod - def _collect_features(examples): - if examples: - collected_features = [ - e.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]) - for e in examples - if e.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]) - is not None - ] - else: - collected_features = [] - - if collected_features: - return np.stack(collected_features) - else: - return None - - def _append_ngram_features(self, examples, existing_features, max_ngrams): - ngrams_to_use = self._ngrams_to_use(max_ngrams) - extras = np.array(self._ngrams_in_sentences(examples, ngrams_to_use)) - if existing_features is not None: - return np.hstack((existing_features, extras)) - else: - return extras - - @staticmethod - def _num_cv_splits(y): - return min(10, np.min(np.bincount(y))) if y.size > 0 else 0 - - @staticmethod - def encode_labels(labels): - from sklearn import preprocessing - - intent_encoder = preprocessing.LabelEncoder() - intent_encoder.fit(labels) - return intent_encoder.transform(labels) - - def _score_ngram_selection( - self, examples, y, existing_text_features, cv_splits, max_ngrams - ): - from sklearn.model_selection import cross_val_score - from sklearn.linear_model import LogisticRegression - - if existing_text_features is None: - return 0.0 - - clf = LogisticRegression(class_weight="balanced") - - no_ngrams_X = self._append_ngram_features( - examples, existing_text_features, max_ngrams - ) - return np.mean(cross_val_score(clf, no_ngrams_X, y, cv=cv_splits)) - - @staticmethod - def _generate_test_points(max_ngrams): - """Generate a list of increasing numbers. - - They are used to take the best n ngrams and evaluate them. This n - is varied to find the best number of ngrams to use. This function - defines the number of ngrams that get tested.""" - - possible_ngrams = np.linspace(0, max_ngrams, 8) - return np.unique(list(map(int, np.floor(possible_ngrams)))) - - def _cross_validation(self, examples, labels) -> int: - """Choose the best number of ngrams to include in bow. - - Given an intent classification problem and a set of ordered ngrams - (ordered in terms of importance by pick_applicable_ngrams) we - choose the best number of ngrams to include in our bow vecs - by cross validation.""" - - max_ngrams = self.component_config["max_number_of_ngrams"] - - if not self.all_ngrams: - logger.debug("Found no ngrams. Using existing features.") - return 0 - - existing_text_features = self._collect_features(examples) - - y = self.encode_labels(labels) - cv_splits = self._num_cv_splits(y) - - if cv_splits >= 3: - logger.debug( - "Started ngram cross-validation to find b" - "est number of ngrams to use..." - ) - - scores = [] - num_ngrams = self._generate_test_points(max_ngrams) - for n in num_ngrams: - score = self._score_ngram_selection( - examples, y, existing_text_features, cv_splits, max_ngrams=n - ) - scores.append(score) - logger.debug( - "Evaluating usage of {} ngrams. Score: {}".format(n, score) - ) - - n_top = num_ngrams[np.argmax(scores)] - logger.info("Best score with {} ngrams: {}".format(n_top, np.max(scores))) - return n_top.item() - else: - warnings.warn( - "Can't cross-validate ngram featurizer. " - "There aren't enough examples per intent " - "(at least 3)" - ) - return max_ngrams diff --git a/rasa/nlu/featurizers/regex_featurizer.py b/rasa/nlu/featurizers/regex_featurizer.py deleted file mode 100644 index efdf7516a703..000000000000 --- a/rasa/nlu/featurizers/regex_featurizer.py +++ /dev/null @@ -1,159 +0,0 @@ -import io -import logging -import numpy as np -import os -import re -import typing -from typing import Any, Dict, Optional, Text - -from rasa.nlu import utils -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.featurizers import Featurizer -from rasa.nlu.training_data import Message, TrainingData -import rasa.utils.io -from rasa.nlu.constants import ( - MESSAGE_TOKENS_NAMES, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_VECTOR_FEATURE_NAMES, -) - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from rasa.nlu.model import Metadata - - -class RegexFeaturizer(Featurizer): - - provides = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - requires = [MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - def __init__(self, component_config=None, known_patterns=None, lookup_tables=None): - - super(RegexFeaturizer, self).__init__(component_config) - - self.known_patterns = known_patterns if known_patterns else [] - lookup_tables = lookup_tables or [] - self._add_lookup_table_regexes(lookup_tables) - - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - self.known_patterns = training_data.regex_features - self._add_lookup_table_regexes(training_data.lookup_tables) - - for example in training_data.training_examples: - updated = self._text_features_with_regex(example) - example.set(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], updated) - - def process(self, message: Message, **kwargs: Any) -> None: - - updated = self._text_features_with_regex(message) - message.set(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE], updated) - - def _text_features_with_regex(self, message): - if self.known_patterns: - extras = self.features_for_patterns(message) - return self._combine_with_existing_features(message, extras) - else: - return message.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]) - - def _add_lookup_table_regexes(self, lookup_tables): - # appends the regex features from the lookup tables to - # self.known_patterns - for table in lookup_tables: - regex_pattern = self._generate_lookup_regex(table) - lookup_regex = {"name": table["name"], "pattern": regex_pattern} - self.known_patterns.append(lookup_regex) - - def features_for_patterns(self, message): - """Checks which known patterns match the message. - - Given a sentence, returns a vector of {1,0} values indicating which - regexes did match. Furthermore, if the - message is tokenized, the function will mark all tokens with a dict - relating the name of the regex to whether it was matched.""" - - found_patterns = [] - for exp in self.known_patterns: - matches = re.finditer(exp["pattern"], message.text) - matches = list(matches) - found_patterns.append(False) - for token_index, t in enumerate( - message.get(MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], []) - ): - patterns = t.get("pattern", default={}) - patterns[exp["name"]] = False - - for match in matches: - if t.offset < match.end() and t.end > match.start(): - patterns[exp["name"]] = True - found_patterns[-1] = True - - t.set("pattern", patterns) - - return np.array(found_patterns).astype(float) - - def _generate_lookup_regex(self, lookup_table): - """creates a regex out of the contents of a lookup table file""" - lookup_elements = lookup_table["elements"] - elements_to_regex = [] - - # if it's a list, it should be the elements directly - if isinstance(lookup_elements, list): - elements_to_regex = lookup_elements - - # otherwise it's a file path. - else: - - try: - f = io.open(lookup_elements, "r", encoding="utf-8") - except IOError: - raise ValueError( - "Could not load lookup table {}" - "Make sure you've provided the correct path".format(lookup_elements) - ) - - with f: - for line in f: - new_element = line.strip() - if new_element: - elements_to_regex.append(new_element) - - # sanitize the regex, escape special characters - elements_sanitized = [re.escape(e) for e in elements_to_regex] - - # regex matching elements with word boundaries on either side - regex_string = "(?i)(\\b" + "\\b|\\b".join(elements_sanitized) + "\\b)" - return regex_string - - @classmethod - def load( - cls, - meta: Dict[Text, Any], - model_dir: Optional[Text] = None, - model_metadata: Optional["Metadata"] = None, - cached_component: Optional["RegexFeaturizer"] = None, - **kwargs: Any - ) -> "RegexFeaturizer": - - file_name = meta.get("file") - regex_file = os.path.join(model_dir, file_name) - - if os.path.exists(regex_file): - known_patterns = rasa.utils.io.read_json_file(regex_file) - return RegexFeaturizer(meta, known_patterns=known_patterns) - else: - return RegexFeaturizer(meta) - - def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: - """Persist this model into the passed directory. - - Return the metadata necessary to load the model again.""" - file_name = file_name + ".pkl" - regex_file = os.path.join(model_dir, file_name) - utils.write_json_to_file(regex_file, self.known_patterns, indent=4) - - return {"file": file_name} diff --git a/rasa/nlu/featurizers/spacy_featurizer.py b/rasa/nlu/featurizers/spacy_featurizer.py deleted file mode 100644 index 709eafe13069..000000000000 --- a/rasa/nlu/featurizers/spacy_featurizer.py +++ /dev/null @@ -1,72 +0,0 @@ -import numpy as np -import typing -from typing import Any - -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.featurizers import Featurizer -from rasa.nlu.training_data import Message, TrainingData - -if typing.TYPE_CHECKING: - from spacy.language import Language - from spacy.tokens import Doc - -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, - SPACY_FEATURIZABLE_ATTRIBUTES, -) - - -def ndim(spacy_nlp: "Language") -> int: - """Number of features used to represent a document / sentence.""" - return spacy_nlp.vocab.vectors_length - - -def features_for_doc(doc: "Doc") -> np.ndarray: - """Feature vector for a single document / sentence.""" - return doc.vector - - -class SpacyFeaturizer(Featurizer): - - provides = [ - MESSAGE_VECTOR_FEATURE_NAMES[attribute] - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES - ] - - requires = [ - MESSAGE_SPACY_FEATURES_NAMES[attribute] - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES - ] - - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - for example in training_data.intent_examples: - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES: - self._set_spacy_features(example, attribute) - - def get_doc(self, message, attribute): - - return message.get(MESSAGE_SPACY_FEATURES_NAMES[attribute]) - - def process(self, message: Message, **kwargs: Any) -> None: - - self._set_spacy_features(message) - - def _set_spacy_features(self, message, attribute=MESSAGE_TEXT_ATTRIBUTE): - """Adds the spacy word vectors to the messages features.""" - - message_attribute_doc = self.get_doc(message, attribute) - if message_attribute_doc is not None: - fs = features_for_doc(message_attribute_doc) - features = self._combine_with_existing_features( - message, fs, MESSAGE_VECTOR_FEATURE_NAMES[attribute] - ) - message.set(MESSAGE_VECTOR_FEATURE_NAMES[attribute], features) diff --git a/rasa/nlu/featurizers/sparse_featurizer/__init__.py b/rasa/nlu/featurizers/sparse_featurizer/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py new file mode 100644 index 000000000000..1d1e08cc2c34 --- /dev/null +++ b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py @@ -0,0 +1,680 @@ +import logging +import os +import re +import scipy.sparse +from typing import Any, Dict, List, Optional, Text, Type, Tuple + +from rasa.constants import DOCS_URL_COMPONENTS +import rasa.utils.common as common_utils +import rasa.utils.io as io_utils +from sklearn.feature_extraction.text import CountVectorizer +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import SparseFeaturizer, Features +from rasa.nlu.model import Metadata +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + TOKENS_NAMES, + MESSAGE_ATTRIBUTES, + INTENT, + DENSE_FEATURIZABLE_ATTRIBUTES, + RESPONSE, + FEATURE_TYPE_SEQUENCE, + FEATURE_TYPE_SENTENCE, + FEATURIZER_CLASS_ALIAS, +) + +logger = logging.getLogger(__name__) + + +class CountVectorsFeaturizer(SparseFeaturizer): + """Creates a sequence of token counts features based on sklearn's `CountVectorizer`. + + All tokens which consist only of digits (e.g. 123 and 99 + but not ab12d) will be represented by a single feature. + + Set `analyzer` to 'char_wb' + to use the idea of Subword Semantic Hashing + from https://arxiv.org/abs/1810.07150. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Tokenizer] + + defaults = { + # whether to use a shared vocab + "use_shared_vocab": False, + # the parameters are taken from + # sklearn's CountVectorizer + # whether to use word or character n-grams + # 'char_wb' creates character n-grams inside word boundaries + # n-grams at the edges of words are padded with space. + "analyzer": "word", # use 'char' or 'char_wb' for character + # remove accents during the preprocessing step + "strip_accents": None, # {'ascii', 'unicode', None} + # list of stop words + "stop_words": None, # string {'english'}, list, or None (default) + # min document frequency of a word to add to vocabulary + # float - the parameter represents a proportion of documents + # integer - absolute counts + "min_df": 1, # float in range [0.0, 1.0] or int + # max document frequency of a word to add to vocabulary + # float - the parameter represents a proportion of documents + # integer - absolute counts + "max_df": 1.0, # float in range [0.0, 1.0] or int + # set range of ngrams to be extracted + "min_ngram": 1, # int + "max_ngram": 1, # int + # limit vocabulary size + "max_features": None, # int or None + # if convert all characters to lowercase + "lowercase": True, # bool + # handling Out-Of-Vocabulary (OOV) words + # will be converted to lowercase if lowercase is True + "OOV_token": None, # string or None + "OOV_words": [], # string or list of strings + } + + @classmethod + def required_packages(cls) -> List[Text]: + return ["sklearn"] + + def _load_count_vect_params(self) -> None: + + # Use shared vocabulary between text and all other attributes of Message + self.use_shared_vocab = self.component_config["use_shared_vocab"] + + # set analyzer + self.analyzer = self.component_config["analyzer"] + + # remove accents during the preprocessing step + self.strip_accents = self.component_config["strip_accents"] + + # list of stop words + self.stop_words = self.component_config["stop_words"] + + # min number of word occurancies in the document to add to vocabulary + self.min_df = self.component_config["min_df"] + + # max number (fraction if float) of word occurancies + # in the document to add to vocabulary + self.max_df = self.component_config["max_df"] + + # set ngram range + self.min_ngram = self.component_config["min_ngram"] + self.max_ngram = self.component_config["max_ngram"] + + # limit vocabulary size + self.max_features = self.component_config["max_features"] + + # if convert all characters to lowercase + self.lowercase = self.component_config["lowercase"] + + # noinspection PyPep8Naming + def _load_OOV_params(self) -> None: + self.OOV_token = self.component_config["OOV_token"] + + self.OOV_words = self.component_config["OOV_words"] + if self.OOV_words and not self.OOV_token: + logger.error( + "The list OOV_words={} was given, but " + "OOV_token was not. OOV words are ignored." + "".format(self.OOV_words) + ) + self.OOV_words = [] + + if self.lowercase and self.OOV_token: + # convert to lowercase + self.OOV_token = self.OOV_token.lower() + if self.OOV_words: + self.OOV_words = [w.lower() for w in self.OOV_words] + + def _check_attribute_vocabulary(self, attribute: Text) -> bool: + """Check if trained vocabulary exists in attribute's count vectorizer""" + try: + return hasattr(self.vectorizers[attribute], "vocabulary_") + except (AttributeError, TypeError): + return False + + def _get_attribute_vocabulary(self, attribute: Text) -> Optional[Dict[Text, int]]: + """Get trained vocabulary from attribute's count vectorizer""" + + try: + return self.vectorizers[attribute].vocabulary_ + except (AttributeError, TypeError): + return None + + def _get_attribute_vocabulary_tokens(self, attribute: Text) -> Optional[List[Text]]: + """Get all keys of vocabulary of an attribute""" + + attribute_vocabulary = self._get_attribute_vocabulary(attribute) + try: + return list(attribute_vocabulary.keys()) + except TypeError: + return None + + def _check_analyzer(self) -> None: + if self.analyzer != "word": + if self.OOV_token is not None: + logger.warning( + "Analyzer is set to character, " + "provided OOV word token will be ignored." + ) + if self.stop_words is not None: + logger.warning( + "Analyzer is set to character, " + "provided stop words will be ignored." + ) + if self.max_ngram == 1: + logger.warning( + "Analyzer is set to character, " + "but max n-gram is set to 1. " + "It means that the vocabulary will " + "contain single letters only." + ) + + @staticmethod + def _attributes_for(analyzer: Text) -> List[Text]: + """Create a list of attributes that should be featurized.""" + + # intents should be featurized only by word level count vectorizer + return ( + MESSAGE_ATTRIBUTES if analyzer == "word" else DENSE_FEATURIZABLE_ATTRIBUTES + ) + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + vectorizers: Optional[Dict[Text, "CountVectorizer"]] = None, + ) -> None: + """Construct a new count vectorizer using the sklearn framework.""" + + super().__init__(component_config) + + # parameters for sklearn's CountVectorizer + self._load_count_vect_params() + + # handling Out-Of-Vocabulary (OOV) words + self._load_OOV_params() + + # warn that some of config parameters might be ignored + self._check_analyzer() + + # set which attributes to featurize + self._attributes = self._attributes_for(self.analyzer) + + # declare class instance for CountVectorizer + self.vectorizers = vectorizers + + @staticmethod + def _get_message_tokens_by_attribute( + message: "Message", attribute: Text + ) -> List[Text]: + """Get text tokens of an attribute of a message""" + if message.get(TOKENS_NAMES[attribute]): + return [t.lemma for t in message.get(TOKENS_NAMES[attribute])] + + return message.get(attribute).split() + + def _process_tokens(self, tokens: List[Text], attribute: Text = TEXT) -> List[Text]: + """Apply processing and cleaning steps to text""" + + if attribute == INTENT: + # Don't do any processing for intent attribute. Treat them as whole labels + return tokens + + # replace all digits with NUMBER token + tokens = [re.sub(r"\b[0-9]+\b", "__NUMBER__", text) for text in tokens] + + # convert to lowercase if necessary + if self.lowercase: + tokens = [text.lower() for text in tokens] + + return tokens + + def _replace_with_oov_token( + self, tokens: List[Text], attribute: Text + ) -> List[Text]: + """Replace OOV words with OOV token""" + + if self.OOV_token and self.analyzer == "word": + vocabulary_exists = self._check_attribute_vocabulary(attribute) + if vocabulary_exists and self.OOV_token in self._get_attribute_vocabulary( + attribute + ): + # CountVectorizer is trained, process for prediction + tokens = [ + t + if t in self._get_attribute_vocabulary_tokens(attribute) + else self.OOV_token + for t in tokens + ] + elif self.OOV_words: + # CountVectorizer is not trained, process for train + tokens = [self.OOV_token if t in self.OOV_words else t for t in tokens] + + return tokens + + def _get_processed_message_tokens_by_attribute( + self, message: Message, attribute: Text = TEXT + ) -> List[Text]: + """Get processed text of attribute of a message""" + + if message.get(attribute) is None: + # return empty list since sklearn countvectorizer does not like None + # object while training and predicting + return [] + + tokens = self._get_message_tokens_by_attribute(message, attribute) + tokens = self._process_tokens(tokens, attribute) + tokens = self._replace_with_oov_token(tokens, attribute) + + return tokens + + # noinspection PyPep8Naming + def _check_OOV_present(self, all_tokens: List[List[Text]], attribute: Text) -> None: + """Check if an OOV word is present""" + if not self.OOV_token or self.OOV_words or not all_tokens: + return + + for tokens in all_tokens: + for text in tokens: + if self.OOV_token in text or ( + self.lowercase and self.OOV_token in text.lower() + ): + return + + if any(text for tokens in all_tokens for text in tokens): + training_data_type = "NLU" if attribute == TEXT else "ResponseSelector" + + # if there is some text in tokens, warn if there is no oov token + common_utils.raise_warning( + f"The out of vocabulary token '{self.OOV_token}' was configured, but " + f"could not be found in any one of the {training_data_type} " + f"training examples. All unseen words will be ignored during prediction.", + docs=DOCS_URL_COMPONENTS + "#countvectorsfeaturizer", + ) + + def _get_all_attributes_processed_tokens( + self, training_data: TrainingData + ) -> Dict[Text, List[List[Text]]]: + """Get processed text for all attributes of examples in training data""" + + processed_attribute_tokens = {} + for attribute in self._attributes: + all_tokens = [ + self._get_processed_message_tokens_by_attribute(example, attribute) + for example in training_data.training_examples + ] + if attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + # check for oov tokens only in text based attributes + self._check_OOV_present(all_tokens, attribute) + processed_attribute_tokens[attribute] = all_tokens + + return processed_attribute_tokens + + @staticmethod + def _convert_attribute_tokens_to_texts( + attribute_tokens: Dict[Text, List[List[Text]]] + ) -> Dict[Text, List[Text]]: + attribute_texts = {} + + for attribute in attribute_tokens.keys(): + list_of_tokens = attribute_tokens[attribute] + attribute_texts[attribute] = [" ".join(tokens) for tokens in list_of_tokens] + + return attribute_texts + + def _train_with_shared_vocab(self, attribute_texts: Dict[Text, List[Text]]): + """Construct the vectorizers and train them with a shared vocab""" + + self.vectorizers = self._create_shared_vocab_vectorizers( + { + "strip_accents": self.strip_accents, + "lowercase": self.lowercase, + "stop_words": self.stop_words, + "min_ngram": self.min_ngram, + "max_ngram": self.max_ngram, + "max_df": self.max_df, + "min_df": self.min_df, + "max_features": self.max_features, + "analyzer": self.analyzer, + } + ) + + combined_cleaned_texts = [] + for attribute in self._attributes: + combined_cleaned_texts += attribute_texts[attribute] + + try: + self.vectorizers[TEXT].fit(combined_cleaned_texts) + except ValueError: + logger.warning( + "Unable to train a shared CountVectorizer. " + "Leaving an untrained CountVectorizer" + ) + + @staticmethod + def _attribute_texts_is_non_empty(attribute_texts: List[Text]) -> bool: + return any(attribute_texts) + + def _train_with_independent_vocab(self, attribute_texts: Dict[Text, List[Text]]): + """Construct the vectorizers and train them with an independent vocab""" + + self.vectorizers = self._create_independent_vocab_vectorizers( + { + "strip_accents": self.strip_accents, + "lowercase": self.lowercase, + "stop_words": self.stop_words, + "min_ngram": self.min_ngram, + "max_ngram": self.max_ngram, + "max_df": self.max_df, + "min_df": self.min_df, + "max_features": self.max_features, + "analyzer": self.analyzer, + } + ) + + for attribute in self._attributes: + if self._attribute_texts_is_non_empty(attribute_texts[attribute]): + try: + self.vectorizers[attribute].fit(attribute_texts[attribute]) + except ValueError: + logger.warning( + f"Unable to train CountVectorizer for message " + f"attribute {attribute}. Leaving an untrained " + f"CountVectorizer for it." + ) + else: + logger.debug( + f"No text provided for {attribute} attribute in any messages of " + f"training data. Skipping training a CountVectorizer for it." + ) + + def _create_features( + self, attribute: Text, all_tokens: List[List[Text]] + ) -> Tuple[ + List[Optional[scipy.sparse.spmatrix]], List[Optional[scipy.sparse.spmatrix]] + ]: + sequence_features = [] + sentence_features = [] + + for i, tokens in enumerate(all_tokens): + if not tokens: + # nothing to featurize + sequence_features.append(None) + sentence_features.append(None) + continue + + # vectorizer.transform returns a sparse matrix of size + # [n_samples, n_features] + # set input to list of tokens if sequence should be returned + # otherwise join all tokens to a single string and pass that as a list + if not tokens: + # attribute is not set (e.g. response not present) + sequence_features.append(None) + sentence_features.append(None) + continue + + seq_vec = self.vectorizers[attribute].transform(tokens) + seq_vec.sort_indices() + + sequence_features.append(seq_vec.tocoo()) + + if attribute in [TEXT, RESPONSE]: + tokens_text = [" ".join(tokens)] + sentence_vec = self.vectorizers[attribute].transform(tokens_text) + sentence_vec.sort_indices() + + sentence_features.append(sentence_vec.tocoo()) + else: + sentence_features.append(None) + + return sequence_features, sentence_features + + def _get_featurized_attribute( + self, attribute: Text, all_tokens: List[List[Text]] + ) -> Tuple[ + List[Optional[scipy.sparse.spmatrix]], List[Optional[scipy.sparse.spmatrix]] + ]: + """Return features of a particular attribute for complete data""" + + if self._check_attribute_vocabulary(attribute): + # count vectorizer was trained + return self._create_features(attribute, all_tokens) + else: + return [], [] + + def _set_attribute_features( + self, + attribute: Text, + sequence_features: List[scipy.sparse.spmatrix], + sentence_features: List[scipy.sparse.spmatrix], + examples: List[Message], + ) -> None: + """Set computed features of the attribute to corresponding message objects""" + for i, message in enumerate(examples): + # create bag for each example + if sequence_features[i] is not None: + final_sequence_features = Features( + sequence_features[i], + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + + if sentence_features[i] is not None: + final_sentence_features = Features( + sentence_features[i], + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sentence_features) + + def train( + self, + training_data: TrainingData, + cfg: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + """Train the featurizer. + + Take parameters from config and + construct a new count vectorizer using the sklearn framework. + """ + + spacy_nlp = kwargs.get("spacy_nlp") + if spacy_nlp is not None: + # create spacy lemma_ for OOV_words + self.OOV_words = [t.lemma_ for w in self.OOV_words for t in spacy_nlp(w)] + + # process sentences and collect data for all attributes + processed_attribute_tokens = self._get_all_attributes_processed_tokens( + training_data + ) + + # train for all attributes + attribute_texts = self._convert_attribute_tokens_to_texts( + processed_attribute_tokens + ) + if self.use_shared_vocab: + self._train_with_shared_vocab(attribute_texts) + else: + self._train_with_independent_vocab(attribute_texts) + + # transform for all attributes + for attribute in self._attributes: + sequence_features, sentence_features = self._get_featurized_attribute( + attribute, processed_attribute_tokens[attribute] + ) + + if sequence_features and sentence_features: + self._set_attribute_features( + attribute, + sequence_features, + sentence_features, + training_data.training_examples, + ) + + def process(self, message: Message, **kwargs: Any) -> None: + """Process incoming message and compute and set features""" + + if self.vectorizers is None: + logger.error( + "There is no trained CountVectorizer: " + "component is either not trained or " + "didn't receive enough training data" + ) + return + + attribute = TEXT + message_tokens = self._get_processed_message_tokens_by_attribute( + message, attribute + ) + + # features shape (1, seq, dim) + sequence_features, sentence_features = self._create_features( + attribute, [message_tokens] + ) + + self._set_attribute_features( + attribute, sequence_features, sentence_features, [message] + ) + + def _collect_vectorizer_vocabularies(self) -> Dict[Text, Optional[Dict[Text, int]]]: + """Get vocabulary for all attributes""" + + attribute_vocabularies = {} + for attribute in self._attributes: + attribute_vocabularies[attribute] = self._get_attribute_vocabulary( + attribute + ) + return attribute_vocabularies + + @staticmethod + def _is_any_model_trained(attribute_vocabularies) -> bool: + """Check if any model got trained""" + + return any(value is not None for value in attribute_vocabularies.values()) + + def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: + """Persist this model into the passed directory. + + Returns the metadata necessary to load the model again. + """ + + file_name = file_name + ".pkl" + + if self.vectorizers: + # vectorizer instance was not None, some models could have been trained + attribute_vocabularies = self._collect_vectorizer_vocabularies() + if self._is_any_model_trained(attribute_vocabularies): + # Definitely need to persist some vocabularies + featurizer_file = os.path.join(model_dir, file_name) + + if self.use_shared_vocab: + # Only persist vocabulary from one attribute. Can be loaded and + # distributed to all attributes. + vocab = attribute_vocabularies[TEXT] + else: + vocab = attribute_vocabularies + + io_utils.json_pickle(featurizer_file, vocab) + + return {"file": file_name} + + @classmethod + def _create_shared_vocab_vectorizers( + cls, parameters: Dict[Text, Any], vocabulary: Optional[Any] = None + ) -> Dict[Text, CountVectorizer]: + """Create vectorizers for all attributes with shared vocabulary""" + + shared_vectorizer = CountVectorizer( + token_pattern=r"(?u)\b\w+\b", + strip_accents=parameters["strip_accents"], + lowercase=parameters["lowercase"], + stop_words=parameters["stop_words"], + ngram_range=(parameters["min_ngram"], parameters["max_ngram"]), + max_df=parameters["max_df"], + min_df=parameters["min_df"], + max_features=parameters["max_features"], + analyzer=parameters["analyzer"], + vocabulary=vocabulary, + ) + + attribute_vectorizers = {} + + for attribute in cls._attributes_for(parameters["analyzer"]): + attribute_vectorizers[attribute] = shared_vectorizer + + return attribute_vectorizers + + @classmethod + def _create_independent_vocab_vectorizers( + cls, parameters: Dict[Text, Any], vocabulary: Optional[Any] = None + ) -> Dict[Text, CountVectorizer]: + """Create vectorizers for all attributes with independent vocabulary""" + + attribute_vectorizers = {} + + for attribute in cls._attributes_for(parameters["analyzer"]): + + attribute_vocabulary = vocabulary[attribute] if vocabulary else None + + attribute_vectorizer = CountVectorizer( + token_pattern=r"(?u)\b\w+\b", + strip_accents=parameters["strip_accents"], + lowercase=parameters["lowercase"], + stop_words=parameters["stop_words"], + ngram_range=(parameters["min_ngram"], parameters["max_ngram"]), + max_df=parameters["max_df"], + min_df=parameters["min_df"], + max_features=parameters["max_features"], + analyzer=parameters["analyzer"], + vocabulary=attribute_vocabulary, + ) + attribute_vectorizers[attribute] = attribute_vectorizer + + return attribute_vectorizers + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Optional[Text] = None, + model_metadata: Optional[Metadata] = None, + cached_component: Optional["CountVectorsFeaturizer"] = None, + **kwargs: Any, + ) -> "CountVectorsFeaturizer": + + file_name = meta.get("file") + featurizer_file = os.path.join(model_dir, file_name) + + if not os.path.exists(featurizer_file): + return cls(meta) + + vocabulary = io_utils.json_unpickle(featurizer_file) + + share_vocabulary = meta["use_shared_vocab"] + + if share_vocabulary: + vectorizers = cls._create_shared_vocab_vectorizers( + meta, vocabulary=vocabulary + ) + else: + vectorizers = cls._create_independent_vocab_vectorizers( + meta, vocabulary=vocabulary + ) + + ftr = cls(meta, vectorizers) + + # make sure the vocabulary has been loaded correctly + for attribute in vectorizers: + ftr.vectorizers[attribute]._validate_vocabulary() + + return ftr diff --git a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py new file mode 100644 index 000000000000..b176c3da735c --- /dev/null +++ b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py @@ -0,0 +1,300 @@ +import logging +from collections import defaultdict, OrderedDict +from pathlib import Path + +import numpy as np +from typing import Any, Dict, Optional, Text, List, Type, Union + +from rasa.nlu.tokenizers.spacy_tokenizer import POS_TAG_KEY +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.nlu.components import Component +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.featurizers.featurizer import SparseFeaturizer, Features +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TOKENS_NAMES, + TEXT, + FEATURE_TYPE_SEQUENCE, + FEATURIZER_CLASS_ALIAS, +) + +from rasa.nlu.model import Metadata +import rasa.utils.io as io_utils + +logger = logging.getLogger(__name__) + +END_OF_SENTENCE = "EOS" +BEGIN_OF_SENTENCE = "BOS" + + +class LexicalSyntacticFeaturizer(SparseFeaturizer): + """Creates features for entity extraction. + + Moves with a sliding window over every token in the user message and creates + features according to the configuration. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Tokenizer] + + defaults = { + # 'features' is [before, word, after] array with before, word, + # after holding keys about which features to use for each word, + # for example, 'title' in array before will have the feature + # "is the preceding word in title case?" + # POS features require 'SpacyTokenizer'. + "features": [ + ["low", "title", "upper"], + ["BOS", "EOS", "low", "upper", "title", "digit"], + ["low", "title", "upper"], + ] + } + + function_dict = { + "low": lambda token: token.text.islower(), + "title": lambda token: token.text.istitle(), + "prefix5": lambda token: token.text[:5], + "prefix2": lambda token: token.text[:2], + "suffix5": lambda token: token.text[-5:], + "suffix3": lambda token: token.text[-3:], + "suffix2": lambda token: token.text[-2:], + "suffix1": lambda token: token.text[-1:], + "pos": lambda token: token.data.get(POS_TAG_KEY) + if POS_TAG_KEY in token.data + else None, + "pos2": lambda token: token.data.get(POS_TAG_KEY)[:2] + if "pos" in token.data + else None, + "upper": lambda token: token.text.isupper(), + "digit": lambda token: token.text.isdigit(), + } + + def __init__( + self, + component_config: Dict[Text, Any], + feature_to_idx_dict: Optional[Dict[Text, Any]] = None, + ): + super().__init__(component_config) + + self.feature_to_idx_dict = feature_to_idx_dict or {} + self.number_of_features = self._calculate_number_of_features() + + def _calculate_number_of_features(self) -> int: + return sum( + [ + len(feature_values.values()) + for feature_values in self.feature_to_idx_dict.values() + ] + ) + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + self.feature_to_idx_dict = self._create_feature_to_idx_dict(training_data) + self.number_of_features = self._calculate_number_of_features() + + for example in training_data.training_examples: + self._create_sparse_features(example) + + def process(self, message: Message, **kwargs: Any) -> None: + self._create_sparse_features(message) + + def _create_feature_to_idx_dict( + self, training_data: TrainingData + ) -> Dict[Text, Dict[Text, int]]: + """Create dictionary of all feature values. + + Each feature key, defined in the component configuration, points to + different feature values and their indices in the overall resulting + feature vector. + """ + + # get all possible feature values + all_features = [] + for example in training_data.training_examples: + tokens = example.get(TOKENS_NAMES[TEXT]) + all_features.append(self._tokens_to_features(tokens)) + + # build vocabulary of features + feature_vocabulary = self._build_feature_vocabulary(all_features) + + # assign a unique index to each feature value + return self._map_features_to_indices(feature_vocabulary) + + @staticmethod + def _map_features_to_indices( + feature_vocabulary: Dict[Text, List[Text]] + ) -> Dict[Text, Dict[Text, int]]: + feature_to_idx_dict = {} + offset = 0 + + for feature_name, feature_values in feature_vocabulary.items(): + feature_to_idx_dict[feature_name] = { + str(feature_value): feature_idx + for feature_idx, feature_value in enumerate( + sorted(feature_values), start=offset + ) + } + offset += len(feature_values) + + return feature_to_idx_dict + + @staticmethod + def _build_feature_vocabulary( + features: List[List[Dict[Text, Any]]] + ) -> Dict[Text, List[Text]]: + feature_vocabulary = defaultdict(set) + + for sentence_features in features: + for token_features in sentence_features: + for feature_name, feature_value in token_features.items(): + feature_vocabulary[feature_name].add(feature_value) + + # sort items to ensure same order every time (for tests) + feature_vocabulary = OrderedDict(sorted(feature_vocabulary.items())) + + return feature_vocabulary + + def _create_sparse_features(self, message: Message) -> None: + """Convert incoming messages into sparse features using the configured + features.""" + import scipy.sparse + + tokens = message.get(TOKENS_NAMES[TEXT]) + + sentence_features = self._tokens_to_features(tokens) + one_hot_seq_feature_vector = self._features_to_one_hot(sentence_features) + + sequence_features = scipy.sparse.coo_matrix(one_hot_seq_feature_vector) + + final_sequence_features = Features( + sequence_features, + FEATURE_TYPE_SEQUENCE, + TEXT, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + + def _tokens_to_features(self, tokens: List[Token]) -> List[Dict[Text, Any]]: + """Convert words into discrete features.""" + + configured_features = self.component_config["features"] + sentence_features = [] + + for token_idx in range(len(tokens)): + # get the window size (e.g. before, word, after) of the configured features + # in case of an even number we will look at one more word before, + # e.g. window size 4 will result in a window range of + # [-2, -1, 0, 1] (0 = current word in sentence) + window_size = len(configured_features) + half_window_size = window_size // 2 + window_range = range(-half_window_size, half_window_size + window_size % 2) + + prefixes = [str(i) for i in window_range] + + token_features = {} + + for pointer_position in window_range: + current_idx = token_idx + pointer_position + + # skip, if current_idx is pointing to a non-existing token + if current_idx < 0 or current_idx >= len(tokens): + continue + + token = tokens[token_idx + pointer_position] + + current_feature_idx = pointer_position + half_window_size + prefix = prefixes[current_feature_idx] + + for feature in configured_features[current_feature_idx]: + token_features[f"{prefix}:{feature}"] = self._get_feature_value( + feature, token, token_idx, pointer_position, len(tokens) + ) + + sentence_features.append(token_features) + + return sentence_features + + def _features_to_one_hot( + self, sentence_features: List[Dict[Text, Any]] + ) -> np.ndarray: + """Convert the word features into a one-hot presentation using the indices + in the feature-to-idx dictionary.""" + + one_hot_seq_feature_vector = np.zeros( + [len(sentence_features), self.number_of_features] + ) + + for token_idx, token_features in enumerate(sentence_features): + for feature_name, feature_value in token_features.items(): + feature_value_str = str(feature_value) + if ( + feature_name in self.feature_to_idx_dict + and feature_value_str in self.feature_to_idx_dict[feature_name] + ): + feature_idx = self.feature_to_idx_dict[feature_name][ + feature_value_str + ] + one_hot_seq_feature_vector[token_idx][feature_idx] = 1 + + return one_hot_seq_feature_vector + + def _get_feature_value( + self, + feature: Text, + token: Token, + token_idx: int, + pointer_position: int, + token_length: int, + ) -> Union[bool, int, Text]: + if feature == END_OF_SENTENCE: + return token_idx + pointer_position == token_length - 1 + + if feature == BEGIN_OF_SENTENCE: + return token_idx + pointer_position == 0 + + if feature not in self.function_dict: + raise ValueError( + f"Configured feature '{feature}' not valid. Please check " + f"'{DOCS_URL_COMPONENTS}' for valid configuration parameters." + ) + + value = self.function_dict[feature](token) + if value is None: + logger.debug( + f"Invalid value '{value}' for feature '{feature}'." + f" Feature is ignored." + ) + return value + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Optional[Text] = None, + model_metadata: Optional[Metadata] = None, + cached_component: Optional["LexicalSyntacticFeaturizer"] = None, + **kwargs: Any, + ) -> "LexicalSyntacticFeaturizer": + + file_name = meta.get("file") + + feature_to_idx_file = Path(model_dir) / f"{file_name}.feature_to_idx_dict.pkl" + feature_to_idx_dict = io_utils.json_unpickle(feature_to_idx_file) + + return LexicalSyntacticFeaturizer(meta, feature_to_idx_dict=feature_to_idx_dict) + + def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: + """Persist this model into the passed directory. + Return the metadata necessary to load the model again.""" + + feature_to_idx_file = Path(model_dir) / f"{file_name}.feature_to_idx_dict.pkl" + io_utils.json_pickle(feature_to_idx_file, self.feature_to_idx_dict) + + return {"file": file_name} diff --git a/rasa/nlu/featurizers/sparse_featurizer/ngram_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/ngram_featurizer.py new file mode 100644 index 000000000000..5c80f9d803a8 --- /dev/null +++ b/rasa/nlu/featurizers/sparse_featurizer/ngram_featurizer.py @@ -0,0 +1,25 @@ +import logging + +from typing import Any, Dict, Optional, Text + +from rasa.nlu.featurizers.featurizer import SparseFeaturizer + +logger = logging.getLogger(__name__) + + +class NGramFeaturizer(SparseFeaturizer): + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + super(NGramFeaturizer, self).__init__(component_config) + + raise NotImplementedError( + "REMOVAL warning: You cannot use `NGramFeaturizer` anymore. " + "Please use `CountVectorsFeaturizer` instead. The following settings" + "match the previous `NGramFeaturizer`:" + "" + "- name: 'CountVectorsFeaturizer'" + " analyzer: 'char_wb'" + " min_ngram: 3" + " max_ngram: 17" + " max_features: 10" + " min_df: 5" + ) diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py new file mode 100644 index 000000000000..4071a4627e96 --- /dev/null +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -0,0 +1,176 @@ +import logging +import os +import re +from typing import Any, Dict, List, Optional, Text, Union, Type, Tuple + +import numpy as np +import scipy.sparse + +import rasa.utils.io +import rasa.nlu.utils.pattern_utils as pattern_utils +from rasa.nlu import utils +from rasa.nlu.components import Component +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.constants import ( + RESPONSE, + TEXT, + TOKENS_NAMES, + FEATURE_TYPE_SENTENCE, + FEATURE_TYPE_SEQUENCE, + FEATURIZER_CLASS_ALIAS, +) +from rasa.nlu.featurizers.featurizer import SparseFeaturizer, Features +from rasa.nlu.model import Metadata +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.training_data import Message, TrainingData + +logger = logging.getLogger(__name__) + + +class RegexFeaturizer(SparseFeaturizer): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Tokenizer] + + defaults = { + # text will be processed with case sensitive as default + "case_sensitive": True, + # use lookup tables to generate features + "use_lookup_tables": True, + # use regexes to generate features + "use_regexes": True, + } + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + known_patterns: Optional[List[Dict[Text, Text]]] = None, + ) -> None: + + super().__init__(component_config) + + self.known_patterns = known_patterns if known_patterns else [] + self.case_sensitive = self.component_config["case_sensitive"] + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + + self.known_patterns = pattern_utils.extract_patterns( + training_data, + use_lookup_tables=self.component_config["use_lookup_tables"], + use_regexes=self.component_config["use_regexes"], + ) + + for example in training_data.training_examples: + for attribute in [TEXT, RESPONSE]: + self._text_features_with_regex(example, attribute) + + def process(self, message: Message, **kwargs: Any) -> None: + self._text_features_with_regex(message, TEXT) + + def _text_features_with_regex(self, message: Message, attribute: Text) -> None: + if self.known_patterns: + sequence_features, sentence_features = self._features_for_patterns( + message, attribute + ) + + if sequence_features is not None: + final_sequence_features = Features( + sequence_features, + FEATURE_TYPE_SEQUENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sequence_features) + + if sentence_features is not None: + final_sentence_features = Features( + sentence_features, + FEATURE_TYPE_SENTENCE, + attribute, + self.component_config[FEATURIZER_CLASS_ALIAS], + ) + message.add_features(final_sentence_features) + + def _features_for_patterns( + self, message: Message, attribute: Text + ) -> Tuple[Optional[scipy.sparse.coo_matrix], Optional[scipy.sparse.coo_matrix]]: + """Checks which known patterns match the message. + Given a sentence, returns a vector of {1,0} values indicating which + regexes did match. Furthermore, if the + message is tokenized, the function will mark all tokens with a dict + relating the name of the regex to whether it was matched.""" + + # Attribute not set (e.g. response not present) + if not message.get(attribute): + return None, None + + tokens = message.get(TOKENS_NAMES[attribute], []) + + if not tokens: + # nothing to featurize + return None, None + + flags = 0 # default flag + if not self.case_sensitive: + flags = re.IGNORECASE + + sequence_length = len(tokens) + + sequence_features = np.zeros([sequence_length, len(self.known_patterns)]) + sentence_features = np.zeros([1, len(self.known_patterns)]) + + for pattern_index, pattern in enumerate(self.known_patterns): + matches = re.finditer(pattern["pattern"], message.text, flags=flags) + matches = list(matches) + + for token_index, t in enumerate(tokens): + patterns = t.get("pattern", default={}) + patterns[pattern["name"]] = False + + for match in matches: + if t.start < match.end() and t.end > match.start(): + patterns[pattern["name"]] = True + sequence_features[token_index][pattern_index] = 1.0 + if attribute in [RESPONSE, TEXT]: + # sentence vector should contain all patterns + sentence_features[0][pattern_index] = 1.0 + + t.set("pattern", patterns) + + return ( + scipy.sparse.coo_matrix(sequence_features), + scipy.sparse.coo_matrix(sentence_features), + ) + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Optional[Text] = None, + model_metadata: Optional[Metadata] = None, + cached_component: Optional["RegexFeaturizer"] = None, + **kwargs: Any, + ) -> "RegexFeaturizer": + + file_name = meta.get("file") + regex_file = os.path.join(model_dir, file_name) + + if os.path.exists(regex_file): + known_patterns = rasa.utils.io.read_json_file(regex_file) + return RegexFeaturizer(meta, known_patterns=known_patterns) + else: + return RegexFeaturizer(meta) + + def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: + """Persist this model into the passed directory. + Return the metadata necessary to load the model again.""" + file_name = file_name + ".pkl" + regex_file = os.path.join(model_dir, file_name) + utils.write_json_to_file(regex_file, self.known_patterns, indent=4) + + return {"file": file_name} diff --git a/rasa/nlu/model.py b/rasa/nlu/model.py index fe2d4c0745d4..a5957ec514f4 100644 --- a/rasa/nlu/model.py +++ b/rasa/nlu/model.py @@ -2,21 +2,18 @@ import datetime import logging import os -from typing import Any -from typing import Dict -from typing import List -from typing import Optional -from typing import Text +from typing import Any, Dict, List, Optional, Text import rasa.nlu +import rasa.utils.io from rasa.constants import MINIMUM_COMPATIBLE_VERSION from rasa.nlu import components, utils # pytype: disable=pyi-error from rasa.nlu.components import Component, ComponentBuilder # pytype: disable=pyi-error from rasa.nlu.config import RasaNLUModelConfig, component_config_from_pipeline +from rasa.nlu.constants import INTENT_NAME_KEY from rasa.nlu.persistor import Persistor -from rasa.nlu.training_data import TrainingData, Message +from rasa.nlu.training_data import Message, TrainingData from rasa.nlu.utils import write_json_to_file -import rasa.utils.io MODEL_NAME_PREFIX = "nlu_" @@ -30,10 +27,10 @@ class InvalidModelError(Exception): message -- explanation of why the model is invalid """ - def __init__(self, message): + def __init__(self, message: Text) -> None: self.message = message - def __str__(self): + def __str__(self) -> Text: return self.message @@ -44,14 +41,14 @@ class UnsupportedModelError(Exception): message -- explanation of why the model is invalid """ - def __init__(self, message): + def __init__(self, message: Text) -> None: self.message = message - def __str__(self): + def __str__(self) -> Text: return self.message -class Metadata(object): +class Metadata: """Captures all information about a model to load and prepare it.""" @staticmethod @@ -70,7 +67,7 @@ def load(model_dir: Text): except Exception as e: abspath = os.path.abspath(os.path.join(model_dir, "metadata.json")) raise InvalidModelError( - "Failed to load model metadata from '{}'. {}".format(abspath, e) + f"Failed to load model metadata from '{abspath}'. {e}" ) def __init__(self, metadata: Dict[Text, Any], model_dir: Optional[Text]): @@ -78,7 +75,7 @@ def __init__(self, metadata: Dict[Text, Any], model_dir: Optional[Text]): self.metadata = metadata self.model_dir = model_dir - def get(self, property_name, default=None): + def get(self, property_name: Text, default: Any = None) -> Any: return self.metadata.get(property_name, default) @property @@ -92,7 +89,7 @@ def component_classes(self): def number_of_components(self): return len(self.get("pipeline", [])) - def for_component(self, index, defaults=None): + def for_component(self, index: int, defaults: Any = None) -> Dict[Text, Any]: return component_config_from_pipeline(index, self.get("pipeline", []), defaults) @property @@ -117,14 +114,12 @@ def persist(self, model_dir: Text): write_json_to_file(filename, metadata, indent=4) -class Trainer(object): +class Trainer: """Trainer will load the data and train all components. Requires a pipeline specification and configuration to use for - the training.""" - - # Officially supported languages (others might be used, but might fail) - SUPPORTED_LANGUAGES = ["de", "en"] + the training. + """ def __init__( self, @@ -150,11 +145,11 @@ def __init__( # build pipeline self.pipeline = self._build_pipeline(cfg, component_builder) - @staticmethod def _build_pipeline( - cfg: RasaNLUModelConfig, component_builder: ComponentBuilder + self, cfg: RasaNLUModelConfig, component_builder: ComponentBuilder ) -> List[Component]: - """Transform the passed names of the pipeline components into classes""" + """Transform the passed names of the pipeline components into classes.""" + pipeline = [] # Transform the passed names of the pipeline components into classes @@ -163,6 +158,9 @@ def _build_pipeline( component = component_builder.create_component(component_cfg, cfg) pipeline.append(component) + if not self.skip_validation: + components.validate_pipeline(pipeline) + return pipeline def train(self, data: TrainingData, **kwargs: Any) -> "Interpreter": @@ -181,7 +179,6 @@ def train(self, data: TrainingData, **kwargs: Any) -> "Interpreter": # Before the training starts: check that all arguments are provided if not self.skip_validation: - components.validate_arguments(self.pipeline, context) components.validate_required_components_from_data( self.pipeline, self.training_data ) @@ -190,7 +187,7 @@ def train(self, data: TrainingData, **kwargs: Any) -> "Interpreter": working_data = copy.deepcopy(data) for i, component in enumerate(self.pipeline): - logger.info("Starting to train component {}".format(component.name)) + logger.info(f"Starting to train component {component.name}") component.prepare_partial_processing(self.pipeline[:i], context) updates = component.train(working_data, self.config, **context) logger.info("Finished training component.") @@ -200,8 +197,8 @@ def train(self, data: TrainingData, **kwargs: Any) -> "Interpreter": return Interpreter(self.pipeline, context) @staticmethod - def _file_name(index, name): - return "component_{}_{}".format(index, name) + def _file_name(index: int, name: Text) -> Text: + return f"component_{index}_{name}" def persist( self, @@ -250,14 +247,14 @@ def persist( return dir_name -class Interpreter(object): +class Interpreter: """Use a trained pipeline of components to parse text messages.""" # Defines all attributes (& default values) # that will be returned by `parse` @staticmethod def default_output_attributes() -> Dict[Text, Any]: - return {"intent": {"name": None, "confidence": 0.0}, "entities": []} + return {"intent": {INTENT_NAME_KEY: None, "confidence": 0.0}, "entities": []} @staticmethod def ensure_model_compatibility( diff --git a/rasa/nlu/persistor.py b/rasa/nlu/persistor.py index e8be91011fb6..d5f442391ca4 100644 --- a/rasa/nlu/persistor.py +++ b/rasa/nlu/persistor.py @@ -1,10 +1,11 @@ -import io import logging import os import shutil import tarfile from typing import List, Optional, Text, Tuple +import rasa.utils.common + logger = logging.getLogger(__name__) @@ -12,7 +13,7 @@ def get_persistor(name: Text) -> Optional["Persistor"]: """Returns an instance of the requested persistor. - Currently, `aws`, `gcs` and `azure` are supported""" + Currently, `aws`, `gcs`, `azure` and providing module paths are supported remote storages.""" if name == "aws": return AWSPersistor( @@ -27,18 +28,28 @@ def get_persistor(name: Text) -> Optional["Persistor"]: os.environ.get("AZURE_ACCOUNT_NAME"), os.environ.get("AZURE_ACCOUNT_KEY"), ) - + if name: + try: + persistor = rasa.utils.common.class_from_module_path(name) + return persistor() + except ImportError: + raise ImportError( + f"Unknown model persistor {name}. Please make sure to " + "either use an included model persistor (`aws`, `gcs` " + "or `azure`) or specify the module path to an external " + "model persistor." + ) return None -class Persistor(object): +class Persistor: """Store models in cloud and fetch them when needed""" def persist(self, model_directory: Text, model_name: Text) -> None: """Uploads a model persisted in the `target_dir` to cloud storage.""" if not os.path.isdir(model_directory): - raise ValueError("Target directory '{}' not found.".format(model_directory)) + raise ValueError(f"Target directory '{model_directory}' not found.") file_key, tar_path = self._compress(model_directory, model_name) self._persist_tar(file_key, tar_path) @@ -53,7 +64,7 @@ def retrieve(self, model_name: Text, target_path: Text) -> None: tar_name = self._tar_name(model_name) self._retrieve_tar(tar_name) - self._decompress(tar_name, target_path) + self._decompress(os.path.basename(tar_name), target_path) def list_models(self) -> List[Text]: """Lists all the trained models.""" @@ -99,7 +110,7 @@ def _model_dir_and_model_from_filename(filename: Text) -> Tuple[Text, Text]: def _tar_name(model_name: Text, include_extension: bool = True) -> Text: ext = ".tar.gz" if include_extension else "" - return "{m}{ext}".format(m=model_name, ext=ext) + return f"{model_name}{ext}" @staticmethod def _decompress(compressed_path: Text, target_path: Text) -> None: @@ -113,12 +124,19 @@ class AWSPersistor(Persistor): Fetches them when needed, instead of storing them on the local disk.""" - def __init__(self, bucket_name: Text, endpoint_url: Optional[Text] = None) -> None: + def __init__( + self, + bucket_name: Text, + endpoint_url: Optional[Text] = None, + region_name: Optional[Text] = None, + ) -> None: import boto3 - super(AWSPersistor, self).__init__() - self.s3 = boto3.resource("s3", endpoint_url=endpoint_url) - self._ensure_bucket_exists(bucket_name) + super().__init__() + self.s3 = boto3.resource( + "s3", endpoint_url=endpoint_url, region_name=region_name + ) + self._ensure_bucket_exists(bucket_name, region_name) self.bucket_name = bucket_name self.bucket = self.s3.Bucket(bucket_name) @@ -129,14 +147,19 @@ def list_models(self) -> List[Text]: for obj in self.bucket.objects.filter() ] except Exception as e: - logger.warning("Failed to list models in AWS. {}".format(e)) + logger.warning(f"Failed to list models in AWS. {e}") return [] - def _ensure_bucket_exists(self, bucket_name: Text) -> None: + def _ensure_bucket_exists( + self, bucket_name: Text, region_name: Optional[Text] = None + ) -> None: import boto3 import botocore - bucket_config = {"LocationConstraint": boto3.DEFAULT_SESSION.region_name} + if not region_name: + region_name = boto3.DEFAULT_SESSION.region_name + + bucket_config = {"LocationConstraint": region_name} # noinspection PyUnresolvedReferences try: self.s3.create_bucket( @@ -151,22 +174,22 @@ def _persist_tar(self, file_key: Text, tar_path: Text) -> None: with open(tar_path, "rb") as f: self.s3.Object(self.bucket_name, file_key).put(Body=f) - def _retrieve_tar(self, target_filename: Text) -> None: + def _retrieve_tar(self, model_path: Text) -> None: """Downloads a model that has previously been persisted to s3.""" - - with io.open(target_filename, "wb") as f: - self.bucket.download_fileobj(target_filename, f) + tar_name = os.path.basename(model_path) + with open(tar_name, "wb") as f: + self.bucket.download_fileobj(model_path, f) class GCSPersistor(Persistor): """Store models on Google Cloud Storage. - Fetches them when needed, instead of storing them on the local disk.""" + Fetches them when needed, instead of storing them on the local disk.""" def __init__(self, bucket_name: Text) -> None: from google.cloud import storage - super(GCSPersistor, self).__init__() + super().__init__() self.storage_client = storage.Client() self._ensure_bucket_exists(bucket_name) @@ -183,9 +206,7 @@ def list_models(self) -> List[Text]: for b in blob_iterator ] except Exception as e: - logger.warning( - "Failed to list models in google cloud storage. {}".format(e) - ) + logger.warning(f"Failed to list models in google cloud storage. {e}") return [] def _ensure_bucket_exists(self, bucket_name: Text) -> None: @@ -218,7 +239,7 @@ def __init__( ) -> None: from azure.storage import blob as azureblob - super(AzurePersistor, self).__init__() + super().__init__() self.blob_client = azureblob.BlockBlobService( account_name=azure_account_name, @@ -244,7 +265,7 @@ def list_models(self) -> List[Text]: for b in blob_iterator ] except Exception as e: - logger.warning("Failed to list models azure blob storage. {}".format(e)) + logger.warning(f"Failed to list models azure blob storage. {e}") return [] def _persist_tar(self, file_key: Text, tar_path: Text) -> None: diff --git a/rasa/nlu/registry.py b/rasa/nlu/registry.py index d1d2a47084e4..ea24511df7b5 100644 --- a/rasa/nlu/registry.py +++ b/rasa/nlu/registry.py @@ -8,34 +8,44 @@ import typing from typing import Any, Dict, List, Optional, Text, Type -from rasa.nlu.classifiers.embedding_intent_classifier import EmbeddingIntentClassifier +from rasa.nlu.classifiers.diet_classifier import DIETClassifier +from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier from rasa.nlu.classifiers.keyword_intent_classifier import KeywordIntentClassifier from rasa.nlu.classifiers.mitie_intent_classifier import MitieIntentClassifier from rasa.nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier -from rasa.nlu.selectors.embedding_response_selector import ResponseSelector from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor -from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer -from rasa.nlu.featurizers.mitie_featurizer import MitieFeaturizer -from rasa.nlu.featurizers.ngram_featurizer import NGramFeaturizer -from rasa.nlu.featurizers.regex_featurizer import RegexFeaturizer -from rasa.nlu.featurizers.spacy_featurizer import SpacyFeaturizer +from rasa.nlu.extractors.regex_entity_extractor import RegexEntityExtractor +from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import ( + LexicalSyntacticFeaturizer, +) +from rasa.nlu.featurizers.dense_featurizer.convert_featurizer import ConveRTFeaturizer +from rasa.nlu.featurizers.dense_featurizer.mitie_featurizer import MitieFeaturizer +from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer +from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import ( + CountVectorsFeaturizer, +) +from rasa.nlu.featurizers.dense_featurizer.lm_featurizer import LanguageModelFeaturizer +from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer from rasa.nlu.model import Metadata +from rasa.nlu.selectors.response_selector import ResponseSelector +from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer from rasa.nlu.utils.mitie_utils import MitieNLP from rasa.nlu.utils.spacy_utils import SpacyNLP -from rasa.utils.common import class_from_module_path - +from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP +from rasa.utils.common import class_from_module_path, raise_warning if typing.TYPE_CHECKING: from rasa.nlu.components import Component - from rasa.nlu.config import RasaNLUModelConfig, RasaNLUModelConfig + from rasa.nlu.config import RasaNLUModelConfig logger = logging.getLogger(__name__) @@ -46,28 +56,35 @@ # utils SpacyNLP, MitieNLP, + HFTransformersNLP, # tokenizers MitieTokenizer, SpacyTokenizer, WhitespaceTokenizer, + ConveRTTokenizer, JiebaTokenizer, + LanguageModelTokenizer, # extractors SpacyEntityExtractor, MitieEntityExtractor, CRFEntityExtractor, DucklingHTTPExtractor, EntitySynonymMapper, + RegexEntityExtractor, # featurizers SpacyFeaturizer, MitieFeaturizer, - NGramFeaturizer, RegexFeaturizer, + LexicalSyntacticFeaturizer, CountVectorsFeaturizer, + ConveRTFeaturizer, + LanguageModelFeaturizer, # classifiers SklearnIntentClassifier, MitieIntentClassifier, KeywordIntentClassifier, - EmbeddingIntentClassifier, + DIETClassifier, + FallbackClassifier, # selectors ResponseSelector, ] @@ -75,112 +92,39 @@ # Mapping from a components name to its class to allow name based lookup. registered_components = {c.name: c for c in component_classes} -# DEPRECATED ensures compatibility, will be remove in future versions -old_style_names = { - "nlp_spacy": "SpacyNLP", - "nlp_mitie": "MitieNLP", - "ner_spacy": "SpacyEntityExtractor", - "ner_mitie": "MitieEntityExtractor", - "ner_crf": "CRFEntityExtractor", - "ner_duckling_http": "DucklingHTTPExtractor", - "ner_synonyms": "EntitySynonymMapper", - "intent_featurizer_spacy": "SpacyFeaturizer", - "intent_featurizer_mitie": "MitieFeaturizer", - "intent_featurizer_ngrams": "NGramFeaturizer", - "intent_entity_featurizer_regex": "RegexFeaturizer", - "intent_featurizer_count_vectors": "CountVectorsFeaturizer", - "tokenizer_mitie": "MitieTokenizer", - "tokenizer_spacy": "SpacyTokenizer", - "tokenizer_whitespace": "WhitespaceTokenizer", - "tokenizer_jieba": "JiebaTokenizer", - "intent_classifier_sklearn": "SklearnIntentClassifier", - "intent_classifier_mitie": "MitieIntentClassifier", - "intent_classifier_keyword": "KeywordIntentClassifier", - "intent_classifier_tensorflow_embedding": "EmbeddingIntentClassifier", -} - -# To simplify usage, there are a couple of model templates, that already add -# necessary components in the right order. They also implement -# the preexisting `backends`. -registered_pipeline_templates = { - "pretrained_embeddings_spacy": [ - {"name": "SpacyNLP"}, - {"name": "SpacyTokenizer"}, - {"name": "SpacyFeaturizer"}, - {"name": "RegexFeaturizer"}, - {"name": "CRFEntityExtractor"}, - {"name": "EntitySynonymMapper"}, - {"name": "SklearnIntentClassifier"}, - ], - "keyword": [{"name": "KeywordIntentClassifier"}], - "supervised_embeddings": [ - {"name": "WhitespaceTokenizer"}, - {"name": "RegexFeaturizer"}, - {"name": "CRFEntityExtractor"}, - {"name": "EntitySynonymMapper"}, - {"name": "CountVectorsFeaturizer"}, - { - "name": "CountVectorsFeaturizer", - "analyzer": "char_wb", - "min_ngram": 1, - "max_ngram": 4, - }, - {"name": "EmbeddingIntentClassifier"}, - ], -} - - -def pipeline_template(s: Text) -> Optional[List[Dict[Text, Any]]]: - import copy - - # do a deepcopy to avoid changing the template configurations - return copy.deepcopy(registered_pipeline_templates.get(s)) - def get_component_class(component_name: Text) -> Type["Component"]: """Resolve component name to a registered components class.""" if component_name not in registered_components: - if component_name not in old_style_names: - try: - return class_from_module_path(component_name) - - except AttributeError: - # when component_name is a path to a class but the path does not contain that class - module_name, _, class_name = component_name.rpartition(".") - raise Exception( - "Failed to find class '{}' in module '{}'.\n" - "".format(component_name, class_name, module_name) - ) - except ImportError as e: - # when component_name is a path to a class but that path is invalid or - # when component_name is a class name and not part of old_style_names - # TODO: Raise ModuleNotFoundError when component_name is a path to a class but that path is invalid - # as soon as we no longer support Python 3.5. See PR #4166 for details - - is_path = "." in component_name - - if is_path: - module_name, _, _ = component_name.rpartition(".") - exception_message = "Failed to find module '{}'. \n{}".format( - module_name, e.msg - ) - else: - exception_message = "Cannot find class '{0}' from global namespace. Please check that there is no typo in the class " - "name and that you have imported the class into the global namespace.".format( - component_name - ) - - raise Exception(exception_message) - else: - # DEPRECATED ensures compatibility, remove in future versions - logger.warning( - "DEPRECATION warning: your nlu config file " - "contains old style component name `{}`, " - "you should change it to its class name: `{}`." - "".format(component_name, old_style_names[component_name]) + try: + return class_from_module_path(component_name) + + except AttributeError: + # when component_name is a path to a class but the path does not contain + # that class + module_name, _, class_name = component_name.rpartition(".") + raise Exception( + f"Failed to find class '{class_name}' in module '{module_name}'.\n" ) - component_name = old_style_names[component_name] + except ImportError as e: + # when component_name is a path to a class but that path is invalid or + # when component_name is a class name and not part of old_style_names + + is_path = "." in component_name + + if is_path: + module_name, _, _ = component_name.rpartition(".") + exception_message = f"Failed to find module '{module_name}'. \n{e}" + else: + exception_message = ( + f"Cannot find class '{component_name}' from global namespace. " + f"Please check that there is no typo in the class " + f"name and that you have imported the class into the global " + f"namespace." + ) + + raise ModuleNotFoundError(exception_message) return registered_components[component_name] @@ -190,7 +134,7 @@ def load_component_by_meta( model_dir: Text, metadata: Metadata, cached_component: Optional["Component"], - **kwargs: Any + **kwargs: Any, ) -> Optional["Component"]: """Resolves a component and calls its load method. diff --git a/rasa/nlu/run.py b/rasa/nlu/run.py index 295d43f37aa4..94a91d64ea03 100644 --- a/rasa/nlu/run.py +++ b/rasa/nlu/run.py @@ -1,16 +1,22 @@ import asyncio import logging +import typing +from typing import Optional, Text from rasa.cli.utils import print_success - +from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, RegexInterpreter from rasa.nlu.model import Interpreter from rasa.nlu.utils import json_to_string -from rasa.core.interpreter import RegexInterpreter, INTENT_MESSAGE_PREFIX + +if typing.TYPE_CHECKING: + from rasa.nlu.components import ComponentBuilder logger = logging.getLogger(__name__) -def run_cmdline(model_path, component_builder=None): +def run_cmdline( + model_path: Text, component_builder: Optional["ComponentBuilder"] = None +) -> None: interpreter = Interpreter.load(model_path, component_builder) regex_interpreter = RegexInterpreter() @@ -24,7 +30,7 @@ def run_cmdline(model_path, component_builder=None): else: result = interpreter.parse(message) - print (json_to_string(result)) + print(json_to_string(result)) if __name__ == "__main__": diff --git a/rasa/nlu/schemas/__init__.py b/rasa/nlu/schemas/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rasa/nlu/schemas/data_schema.py b/rasa/nlu/schemas/data_schema.py new file mode 100644 index 000000000000..d511a1cf7c29 --- /dev/null +++ b/rasa/nlu/schemas/data_schema.py @@ -0,0 +1,87 @@ +from typing import Dict, Text, Any + + +def entity_dict_schema() -> Dict[Text, Any]: + """Returns: schema for defining entities in Markdown format.""" + return { + "type": "object", + "properties": _common_entity_properties(), + "required": ["entity"], + } + + +def _common_entity_properties() -> Dict[Text, Any]: + return { + "entity": {"type": "string"}, + "role": {"type": "string"}, + "group": {"type": "string"}, + "value": {"type": "string"}, + } + + +def rasa_nlu_data_schema() -> Dict[Text, Any]: + """Returns: schema of the Rasa NLU data format (json format).""" + entity_properties = _common_entity_properties() + entity_properties["start"] = {"type": "number"} + entity_properties["end"] = {"type": "number"} + + training_example_schema = { + "type": "object", + "properties": { + "text": {"type": "string", "minLength": 1}, + "intent": {"type": "string"}, + "entities": { + "type": "array", + "items": { + "type": "object", + "properties": entity_properties, + "required": ["start", "end", "entity"], + }, + }, + }, + "required": ["text"], + } + + regex_feature_schema = { + "type": "object", + "properties": {"name": {"type": "string"}, "pattern": {"type": "string"}}, + } + + lookup_table_schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "elements": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] + }, + }, + } + + return { + "type": "object", + "properties": { + "rasa_nlu_data": { + "type": "object", + "properties": { + "regex_features": {"type": "array", "items": regex_feature_schema}, + "common_examples": { + "type": "array", + "items": training_example_schema, + }, + "intent_examples": { + "type": "array", + "items": training_example_schema, + }, + "entity_examples": { + "type": "array", + "items": training_example_schema, + }, + "lookup_tables": {"type": "array", "items": lookup_table_schema}, + }, + } + }, + "additionalProperties": False, + } diff --git a/rasa/nlu/schemas/nlu.yml b/rasa/nlu/schemas/nlu.yml new file mode 100644 index 000000000000..370fc666deab --- /dev/null +++ b/rasa/nlu/schemas/nlu.yml @@ -0,0 +1,55 @@ +allowempty: True +mapping: + version: + type: "str" + required: False + allowempty: False + nlu: + type: "seq" + matching: "any" + sequence: + - type: "map" + mapping: + intent: &intent_anchor + type: "str" + allowempty: False + metadata: &metadata_anchor + type: "any" + required: False + examples: &examples_anchor + type: "str" + - type: "map" + mapping: + intent: *intent_anchor + metadata: *metadata_anchor + examples: + type: "seq" + sequence: + - type: "map" + mapping: + text: + type: "str" + allowempty: False + metadata: *metadata_anchor + - type: "map" + mapping: + synonym: + type: "str" + examples: *examples_anchor + - type: "map" + mapping: + regex: + type: "str" + examples: *examples_anchor + - type: "map" + mapping: + lookup: + type: "str" + examples: *examples_anchor + responses: + type: "map" + allowempty: True + required: False + stories: + type: "any" + required: False diff --git a/rasa/nlu/selectors/embedding_response_selector.py b/rasa/nlu/selectors/embedding_response_selector.py deleted file mode 100644 index 55f69c5bdca4..000000000000 --- a/rasa/nlu/selectors/embedding_response_selector.py +++ /dev/null @@ -1,195 +0,0 @@ -import logging -import typing -from typing import Any, Dict, Optional, Text - -from rasa.nlu.classifiers.embedding_intent_classifier import EmbeddingIntentClassifier -from rasa.core.actions.action import RESPOND_PREFIX -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, - OPEN_UTTERANCE_PREDICTION_KEY, - OPEN_UTTERANCE_RANKING_KEY, - MESSAGE_SELECTOR_PROPERTY_NAME, - DEFAULT_OPEN_UTTERANCE_TYPE, -) - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from rasa.nlu.training_data import Message - -import tensorflow as tf - -# avoid warning println on contrib import - remove for tf 2 -tf.contrib._warning = None - - -class ResponseSelector(EmbeddingIntentClassifier): - """Response selector using supervised embeddings. - - The response selector embeds user inputs - and candidate response into the same space. - Supervised embeddings are trained by maximizing similarity between them. - It also provides rankings of the response that did not "win". - - The supervised response selector needs to be preceded by - a featurizer in the pipeline. - This featurizer creates the features used for the embeddings. - It is recommended to use ``CountVectorsFeaturizer`` that - can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``. - - Based on the starspace idea from: https://arxiv.org/abs/1709.03856. - However, in this implementation the `mu` parameter is treated differently - and additional hidden layers are added together with dropout. - """ - - provides = ["response", "response_ranking"] - - requires = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]] - - # default properties (DOC MARKER - don't remove) - defaults = { - # nn architecture - # sizes of hidden layers before the embedding layer for input words - # the number of hidden layers is thus equal to the length of this list - "hidden_layers_sizes_a": [256, 128], - # sizes of hidden layers before the embedding layer for intent labels - # the number of hidden layers is thus equal to the length of this list - "hidden_layers_sizes_b": [256, 128], - # Whether to share the hidden layer weights between input words and intent labels - "share_hidden_layers": False, - # training parameters - # initial and final batch sizes - batch size will be - # linearly increased for each epoch - "batch_size": [64, 256], - # how to create batches - "batch_strategy": "balanced", # string 'sequence' or 'balanced' - # number of epochs - "epochs": 300, - # set random seed to any int to get reproducible results - "random_seed": None, - # embedding parameters - # dimension size of embedding vectors - "embed_dim": 20, - # the type of the similarity - "num_neg": 20, - # flag if minimize only maximum similarity over incorrect actions - "similarity_type": "auto", # string 'auto' or 'cosine' or 'inner' - # the type of the loss function - "loss_type": "softmax", # string 'softmax' or 'margin' - # how similar the algorithm should try - # to make embedding vectors for correct intent labels - "mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine' - # maximum negative similarity for incorrect intent labels - "mu_neg": -0.4, # should be -1.0 < ... < 1.0 for 'cosine' - # flag: if true, only minimize the maximum similarity for - # incorrect intent labels - "use_max_sim_neg": True, - # scale loss inverse proportionally to confidence of correct prediction - "scale_loss": True, - # regularization parameters - # the scale of L2 regularization - "C2": 0.002, - # the scale of how critical the algorithm should be of minimizing the - # maximum similarity between embeddings of different intent labels - "C_emb": 0.8, - # dropout rate for rnn - "droprate": 0.2, - # visualization of accuracy - # how often to calculate training accuracy - "evaluate_every_num_epochs": 20, # small values may hurt performance - # how many examples to use for calculation of training accuracy - "evaluate_on_num_examples": 0, # large values may hurt performance, - # selector config - # name of the intent for which this response selector is to be trained - "retrieval_intent": None, - } - # end default properties (DOC MARKER - don't remove) - - def _load_selector_params(self, config: Dict[Text, Any]): - self.retrieval_intent = config["retrieval_intent"] - if not self.retrieval_intent: - # retrieval intent was left to its default value - logger.info( - "Retrieval intent parameter was left to its default value. This response selector will be trained" - "on training examples combining all retrieval intents." - ) - - def _load_params(self) -> None: - super(ResponseSelector, self)._load_params() - self._load_selector_params(self.component_config) - - @staticmethod - def _set_message_property( - message: "Message", prediction_dict: Dict[Text, Any], selector_key: Text - ): - - message_selector_properties = message.get(MESSAGE_SELECTOR_PROPERTY_NAME, {}) - message_selector_properties[selector_key] = prediction_dict - message.set( - MESSAGE_SELECTOR_PROPERTY_NAME, - message_selector_properties, - add_to_output=True, - ) - - def preprocess_train_data(self, training_data): - """Performs sanity checks on training data, extracts encodings for labels and prepares data for training""" - - if self.retrieval_intent: - training_data = training_data.filter_by_intent(self.retrieval_intent) - - label_id_dict = self._create_label_id_dict( - training_data, attribute=MESSAGE_RESPONSE_ATTRIBUTE - ) - - self.inverted_label_dict = {v: k for k, v in label_id_dict.items()} - self._encoded_all_label_ids = self._create_encoded_label_ids( - training_data, - label_id_dict, - attribute=MESSAGE_RESPONSE_ATTRIBUTE, - attribute_feature_name=MESSAGE_VECTOR_FEATURE_NAMES[ - MESSAGE_RESPONSE_ATTRIBUTE - ], - ) - - # check if number of negatives is less than number of label_ids - logger.debug( - "Check if num_neg {} is smaller than " - "number of label_ids {}, " - "else set num_neg to the number of label_ids - 1" - "".format(self.num_neg, self._encoded_all_label_ids.shape[0]) - ) - # noinspection PyAttributeOutsideInit - self.num_neg = min(self.num_neg, self._encoded_all_label_ids.shape[0] - 1) - - session_data = self._create_session_data( - training_data, label_id_dict, attribute=MESSAGE_RESPONSE_ATTRIBUTE - ) - - self.check_input_dimension_consistency(session_data) - - return session_data - - def process(self, message: "Message", **kwargs: Any) -> None: - """Return the most likely response and its similarity to the input.""" - - label, label_ranking = self.predict_label(message) - - selector_key = ( - self.retrieval_intent - if self.retrieval_intent - else DEFAULT_OPEN_UTTERANCE_TYPE - ) - - logger.debug( - "Adding following selector key to message property: {}".format(selector_key) - ) - - prediction_dict = {"response": label, "ranking": label_ranking} - - self._set_message_property(message, prediction_dict, selector_key) diff --git a/rasa/nlu/selectors/response_selector.py b/rasa/nlu/selectors/response_selector.py new file mode 100644 index 000000000000..994115368593 --- /dev/null +++ b/rasa/nlu/selectors/response_selector.py @@ -0,0 +1,628 @@ +import logging + +import numpy as np +import tensorflow as tf +from pathlib import Path + +from typing import Any, Dict, Optional, Text, Tuple, Union, List, Type + +import rasa.utils.io as io_utils +from rasa.nlu.config import InvalidConfigError +from rasa.nlu.training_data import TrainingData, Message +from rasa.nlu.components import Component +from rasa.nlu.featurizers.featurizer import Featurizer +from rasa.nlu.model import Metadata +from rasa.nlu.classifiers.diet_classifier import ( + DIETClassifier, + DIET, + LABEL_IDS, + EntityTagSpec, + TEXT_SEQUENCE_LENGTH, + LABEL_SEQUENCE_LENGTH, + TEXT_SEQUENCE_FEATURES, + LABEL_SEQUENCE_FEATURES, + TEXT_SENTENCE_FEATURES, + LABEL_SENTENCE_FEATURES, +) +from rasa.utils.tensorflow.constants import ( + LABEL, + HIDDEN_LAYERS_SIZES, + SHARE_HIDDEN_LAYERS, + TRANSFORMER_SIZE, + NUM_TRANSFORMER_LAYERS, + NUM_HEADS, + BATCH_SIZES, + BATCH_STRATEGY, + EPOCHS, + RANDOM_SEED, + LEARNING_RATE, + DENSE_DIMENSION, + RANKING_LENGTH, + LOSS_TYPE, + SIMILARITY_TYPE, + NUM_NEG, + SPARSE_INPUT_DROPOUT, + DENSE_INPUT_DROPOUT, + MASKED_LM, + ENTITY_RECOGNITION, + INTENT_CLASSIFICATION, + EVAL_NUM_EXAMPLES, + EVAL_NUM_EPOCHS, + UNIDIRECTIONAL_ENCODER, + DROP_RATE, + DROP_RATE_ATTENTION, + WEIGHT_SPARSITY, + NEGATIVE_MARGIN_SCALE, + REGULARIZATION_CONSTANT, + SCALE_LOSS, + USE_MAX_NEG_SIM, + MAX_NEG_SIM, + MAX_POS_SIM, + EMBEDDING_DIMENSION, + BILOU_FLAG, + KEY_RELATIVE_ATTENTION, + VALUE_RELATIVE_ATTENTION, + MAX_RELATIVE_POSITION, + RETRIEVAL_INTENT, + SOFTMAX, + AUTO, + BALANCED, + TENSORBOARD_LOG_DIR, + TENSORBOARD_LOG_LEVEL, + CONCAT_DIMENSION, + FEATURIZERS, +) +from rasa.nlu.constants import ( + RESPONSE, + RESPONSE_SELECTOR_PROPERTY_NAME, + RESPONSE_KEY_ATTRIBUTE, + INTENT, + DEFAULT_OPEN_UTTERANCE_TYPE, + TEXT, +) + +from rasa.utils.tensorflow.model_data import RasaModelData +from rasa.utils.tensorflow.models import RasaModel + +logger = logging.getLogger(__name__) + + +class ResponseSelector(DIETClassifier): + """Response selector using supervised embeddings. + + The response selector embeds user inputs + and candidate response into the same space. + Supervised embeddings are trained by maximizing similarity between them. + It also provides rankings of the response that did not "win". + + The supervised response selector needs to be preceded by + a featurizer in the pipeline. + This featurizer creates the features used for the embeddings. + It is recommended to use ``CountVectorsFeaturizer`` that + can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``. + + Based on the starspace idea from: https://arxiv.org/abs/1709.03856. + However, in this implementation the `mu` parameter is treated differently + and additional hidden layers are added together with dropout. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [Featurizer] + + defaults = { + # ## Architecture of the used neural network + # Hidden layer sizes for layers before the embedding layers for user message + # and labels. + # The number of hidden layers is equal to the length of the corresponding + # list. + HIDDEN_LAYERS_SIZES: {TEXT: [256, 128], LABEL: [256, 128]}, + # Whether to share the hidden layer weights between input words and responses + SHARE_HIDDEN_LAYERS: False, + # Number of units in transformer + TRANSFORMER_SIZE: None, + # Number of transformer layers + NUM_TRANSFORMER_LAYERS: 0, + # Number of attention heads in transformer + NUM_HEADS: 4, + # If 'True' use key relative embeddings in attention + KEY_RELATIVE_ATTENTION: False, + # If 'True' use key relative embeddings in attention + VALUE_RELATIVE_ATTENTION: False, + # Max position for relative embeddings + MAX_RELATIVE_POSITION: None, + # Use a unidirectional or bidirectional encoder. + UNIDIRECTIONAL_ENCODER: False, + # ## Training parameters + # Initial and final batch sizes: + # Batch size will be linearly increased for each epoch. + BATCH_SIZES: [64, 256], + # Strategy used when creating batches. + # Can be either 'sequence' or 'balanced'. + BATCH_STRATEGY: BALANCED, + # Number of epochs to train + EPOCHS: 300, + # Set random seed to any 'int' to get reproducible results + RANDOM_SEED: None, + # Initial learning rate for the optimizer + LEARNING_RATE: 0.001, + # ## Parameters for embeddings + # Dimension size of embedding vectors + EMBEDDING_DIMENSION: 20, + # Default dense dimension to use if no dense features are present. + DENSE_DIMENSION: {TEXT: 512, LABEL: 512}, + # Default dimension to use for concatenating sequence and sentence features. + CONCAT_DIMENSION: {TEXT: 512, LABEL: 512}, + # The number of incorrect labels. The algorithm will minimize + # their similarity to the user input during training. + NUM_NEG: 20, + # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'. + SIMILARITY_TYPE: AUTO, + # The type of the loss function, either 'softmax' or 'margin'. + LOSS_TYPE: SOFTMAX, + # Number of top actions to normalize scores for loss type 'softmax'. + # Set to 0 to turn off normalization. + RANKING_LENGTH: 10, + # Indicates how similar the algorithm should try to make embedding vectors + # for correct labels. + # Should be 0.0 < ... < 1.0 for 'cosine' similarity type. + MAX_POS_SIM: 0.8, + # Maximum negative similarity for incorrect labels. + # Should be -1.0 < ... < 1.0 for 'cosine' similarity type. + MAX_NEG_SIM: -0.4, + # If 'True' the algorithm only minimizes maximum similarity over + # incorrect intent labels, used only if 'loss_type' is set to 'margin'. + USE_MAX_NEG_SIM: True, + # Scale loss inverse proportionally to confidence of correct prediction + SCALE_LOSS: True, + # ## Regularization parameters + # The scale of regularization + REGULARIZATION_CONSTANT: 0.002, + # Sparsity of the weights in dense layers + WEIGHT_SPARSITY: 0.0, + # The scale of how important is to minimize the maximum similarity + # between embeddings of different labels. + NEGATIVE_MARGIN_SCALE: 0.8, + # Dropout rate for encoder + DROP_RATE: 0.2, + # Dropout rate for attention + DROP_RATE_ATTENTION: 0, + # If 'True' apply dropout to sparse input tensors + SPARSE_INPUT_DROPOUT: False, + # If 'True' apply dropout to dense input tensors + DENSE_INPUT_DROPOUT: False, + # ## Evaluation parameters + # How often calculate validation accuracy. + # Small values may hurt performance, e.g. model accuracy. + EVAL_NUM_EPOCHS: 20, + # How many examples to use for hold out validation set + # Large values may hurt performance, e.g. model accuracy. + EVAL_NUM_EXAMPLES: 0, + # ## Selector config + # If 'True' random tokens of the input message will be masked and the model + # should predict those tokens. + MASKED_LM: False, + # Name of the intent for which this response selector is to be trained + RETRIEVAL_INTENT: None, + # If you want to use tensorboard to visualize training and validation metrics, + # set this option to a valid output directory. + TENSORBOARD_LOG_DIR: None, + # Define when training metrics for tensorboard should be logged. + # Either after every epoch or for every training step. + # Valid values: 'epoch' and 'minibatch' + TENSORBOARD_LOG_LEVEL: "epoch", + # Specify what features to use as sequence and sentence features + # By default all features in the pipeline are used. + FEATURIZERS: [], + } + + def __init__( + self, + component_config: Optional[Dict[Text, Any]] = None, + index_label_id_mapping: Optional[Dict[int, Text]] = None, + entity_tag_specs: Optional[List[EntityTagSpec]] = None, + model: Optional[RasaModel] = None, + retrieval_intent_mapping: Optional[Dict[Text, Text]] = None, + responses: Optional[Dict[Text, List[Dict[Text, Any]]]] = None, + ) -> None: + + component_config = component_config or {} + + # the following properties cannot be adapted for the ResponseSelector + component_config[INTENT_CLASSIFICATION] = True + component_config[ENTITY_RECOGNITION] = False + component_config[BILOU_FLAG] = None + self.retrieval_intent_mapping = retrieval_intent_mapping or {} + self.responses = responses or {} + + super().__init__( + component_config, index_label_id_mapping, entity_tag_specs, model + ) + + @property + def label_key(self) -> Text: + return LABEL_IDS + + @staticmethod + def model_class() -> Type[RasaModel]: + return DIET2DIET + + def _load_selector_params(self, config: Dict[Text, Any]) -> None: + self.retrieval_intent = config[RETRIEVAL_INTENT] + + def _check_config_parameters(self) -> None: + super()._check_config_parameters() + self._load_selector_params(self.component_config) + + @staticmethod + def _create_retrieval_intent_mapping( + training_data: TrainingData, + ) -> Dict[Text, Text]: + """Create response_key dictionary""" + + retrieval_intent_mapping = {} + for example in training_data.intent_examples: + retrieval_intent_mapping[ + example.get(RESPONSE) + ] = f"{example.get(INTENT)}/{example.get(RESPONSE_KEY_ATTRIBUTE)}" + + return retrieval_intent_mapping + + @staticmethod + def _set_message_property( + message: Message, prediction_dict: Dict[Text, Any], selector_key: Text + ) -> None: + message_selector_properties = message.get(RESPONSE_SELECTOR_PROPERTY_NAME, {}) + message_selector_properties[selector_key] = prediction_dict + message.set( + RESPONSE_SELECTOR_PROPERTY_NAME, + message_selector_properties, + add_to_output=True, + ) + + def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData: + """Prepares data for training. + + Performs sanity checks on training data, extracts encodings for labels. + """ + + if self.retrieval_intent: + training_data = training_data.filter_training_examples( + lambda ex: self.retrieval_intent == ex.get(INTENT) + ) + else: + # retrieval intent was left to its default value + logger.info( + "Retrieval intent parameter was left to its default value. This " + "response selector will be trained on training examples combining " + "all retrieval intents." + ) + + label_id_index_mapping = self._label_id_index_mapping( + training_data, attribute=RESPONSE + ) + self.retrieval_intent_mapping = self._create_retrieval_intent_mapping( + training_data + ) + self.responses = training_data.responses + + if not label_id_index_mapping: + # no labels are present to train + return RasaModelData() + + self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping) + + self._label_data = self._create_label_data( + training_data, label_id_index_mapping, attribute=RESPONSE + ) + + model_data = self._create_model_data( + training_data.intent_examples, + label_id_index_mapping, + label_attribute=RESPONSE, + ) + + self._check_input_dimension_consistency(model_data) + + return model_data + + def _full_response(self, label: Dict[Text, Any]) -> Optional[Dict[Text, Any]]: + """Given a label return the full response based on the labels id. + + Args: + label: predicted label by the selector + + Returns: + The match for the label that was found in the known responses. In + contrast to the predicted label, the response doesn't only contain + the text but also buttons, images, ... + """ + for key, responses in self.responses.items(): + for response in responses: + if hash(response.get(TEXT, "")) == label.get("id"): + return response + return None + + def process(self, message: Message, **kwargs: Any) -> None: + """Return the most likely response and its similarity to the input.""" + + out = self._predict(message) + label, label_ranking = self._predict_label(out) + retrieval_intent_name = self.retrieval_intent_mapping.get(label.get("name")) + + for ranking in label_ranking: + ranking["full_retrieval_intent"] = self.retrieval_intent_mapping.get( + ranking.get("name") + ) + + selector_key = ( + self.retrieval_intent + if self.retrieval_intent + else DEFAULT_OPEN_UTTERANCE_TYPE + ) + + logger.debug( + f"Adding following selector key to message property: {selector_key}" + ) + + prediction_dict = { + "response": self._full_response(label) or {TEXT: label.get("name")}, + "ranking": label_ranking, + "full_retrieval_intent": retrieval_intent_name, + } + + self._set_message_property(message, prediction_dict, selector_key) + + def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: + """Persist this model into the passed directory. + + Return the metadata necessary to load the model again. + """ + if self.model is None: + return {"file": None} + + super().persist(file_name, model_dir) + + model_dir = Path(model_dir) + + io_utils.json_pickle( + model_dir / f"{file_name}.retrieval_intent_mapping.pkl", + self.retrieval_intent_mapping, + ) + + return {"file": file_name, "responses": self.responses} + + @classmethod + def load( + cls, + meta: Dict[Text, Any], + model_dir: Text = None, + model_metadata: Metadata = None, + cached_component: Optional["ResponseSelector"] = None, + **kwargs: Any, + ) -> "ResponseSelector": + """Loads the trained model from the provided directory.""" + + model = super().load( + meta, model_dir, model_metadata, cached_component, **kwargs + ) + if not meta.get("file"): + return model # pytype: disable=bad-return-type + + file_name = meta.get("file") + model_dir = Path(model_dir) + + retrieval_intent_mapping = io_utils.json_unpickle( + model_dir / f"{file_name}.retrieval_intent_mapping.pkl" + ) + + model.retrieval_intent_mapping = retrieval_intent_mapping + model.collected_responses = meta.get("responses", {}) + + return model # pytype: disable=bad-return-type + + +class DIET2DIET(DIET): + def _check_data(self) -> None: + if TEXT_SENTENCE_FEATURES not in self.data_signature: + raise InvalidConfigError( + f"No text features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + if LABEL_SENTENCE_FEATURES not in self.data_signature: + raise InvalidConfigError( + f"No label features specified. " + f"Cannot train '{self.__class__.__name__}' model." + ) + if ( + self.config[SHARE_HIDDEN_LAYERS] + and self.data_signature[TEXT_SENTENCE_FEATURES] + != self.data_signature[LABEL_SENTENCE_FEATURES] + ): + raise ValueError( + "If hidden layer weights are shared, data signatures " + "for text_features and label_features must coincide." + ) + + def _create_metrics(self) -> None: + # self.metrics preserve order + # output losses first + self.mask_loss = tf.keras.metrics.Mean(name="m_loss") + self.response_loss = tf.keras.metrics.Mean(name="r_loss") + # output accuracies second + self.mask_acc = tf.keras.metrics.Mean(name="m_acc") + self.response_acc = tf.keras.metrics.Mean(name="r_acc") + + def _update_metrics_to_log(self) -> None: + debug_log_level = logging.getLogger("rasa").level == logging.DEBUG + + if self.config[MASKED_LM]: + self.metrics_to_log.append("m_acc") + if debug_log_level: + self.metrics_to_log.append("m_loss") + + self.metrics_to_log.append("r_acc") + if debug_log_level: + self.metrics_to_log.append("r_loss") + + self._log_metric_info() + + def _log_metric_info(self) -> None: + metric_name = {"t": "total", "m": "mask", "r": "response"} + logger.debug("Following metrics will be logged during training: ") + for metric in self.metrics_to_log: + parts = metric.split("_") + name = f"{metric_name[parts[0]]} {parts[1]}" + logger.debug(f" {metric} ({name})") + + def _prepare_layers(self) -> None: + self.text_name = TEXT + self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL + + self._prepare_sequence_layers(self.text_name) + self._prepare_sequence_layers(self.label_name) + if self.config[MASKED_LM]: + self._prepare_mask_lm_layers(self.text_name) + self._prepare_label_classification_layers() + + def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]: + all_label_ids = self.tf_label_data[LABEL_IDS][0] + + sequence_mask_label = super()._get_mask_for( + self.tf_label_data, LABEL_SEQUENCE_LENGTH + ) + batch_dim = tf.shape(self.tf_label_data[LABEL_IDS][0])[0] + sequence_lengths_label = self._get_sequence_lengths( + self.tf_label_data, LABEL_SEQUENCE_LENGTH, batch_dim + ) + mask_label = self._compute_mask(sequence_lengths_label) + + label_transformed, _, _, _ = self._create_sequence( + self.tf_label_data[LABEL_SEQUENCE_FEATURES], + self.tf_label_data[LABEL_SENTENCE_FEATURES], + sequence_mask_label, + mask_label, + self.label_name, + ) + sentence_label = self._last_token(label_transformed, sequence_lengths_label) + + all_labels_embed = self._tf_layers[f"embed.{LABEL}"](sentence_label) + + return all_label_ids, all_labels_embed + + def batch_loss( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> tf.Tensor: + tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature) + + batch_dim = self._get_batch_dim(tf_batch_data) + sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT_SEQUENCE_LENGTH) + sequence_lengths_text = self._get_sequence_lengths( + tf_batch_data, TEXT_SEQUENCE_LENGTH, batch_dim + ) + mask_text = self._compute_mask(sequence_lengths_text) + + ( + text_transformed, + text_in, + text_seq_ids, + lm_mask_bool_text, + ) = self._create_sequence( + tf_batch_data[TEXT_SEQUENCE_FEATURES], + tf_batch_data[TEXT_SENTENCE_FEATURES], + sequence_mask_text, + mask_text, + self.text_name, + sparse_dropout=self.config[SPARSE_INPUT_DROPOUT], + dense_dropout=self.config[DENSE_INPUT_DROPOUT], + masked_lm_loss=self.config[MASKED_LM], + sequence_ids=True, + ) + + sequence_mask_label = super()._get_mask_for( + tf_batch_data, LABEL_SEQUENCE_LENGTH + ) + sequence_lengths_label = self._get_sequence_lengths( + tf_batch_data, LABEL_SEQUENCE_LENGTH, batch_dim + ) + mask_label = self._compute_mask(sequence_lengths_label) + + label_transformed, _, _, _ = self._create_sequence( + tf_batch_data[LABEL_SEQUENCE_FEATURES], + tf_batch_data[LABEL_SENTENCE_FEATURES], + sequence_mask_label, + mask_label, + self.label_name, + ) + + losses = [] + + if self.config[MASKED_LM]: + loss, acc = self._mask_loss( + text_transformed, + text_in, + text_seq_ids, + lm_mask_bool_text, + self.text_name, + ) + + self.mask_loss.update_state(loss) + self.mask_acc.update_state(acc) + losses.append(loss) + + # get sentence feature vector for label classification + sentence_vector_text = self._last_token(text_transformed, sequence_lengths_text) + sentence_vector_label = self._last_token( + label_transformed, sequence_lengths_label + ) + label_ids = tf_batch_data[LABEL_IDS][0] + + loss, acc = self._calculate_label_loss( + sentence_vector_text, sentence_vector_label, label_ids + ) + self.response_loss.update_state(loss) + self.response_acc.update_state(acc) + losses.append(loss) + + return tf.math.add_n(losses) + + def batch_predict( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> Dict[Text, tf.Tensor]: + tf_batch_data = self.batch_to_model_data_format( + batch_in, self.predict_data_signature + ) + + sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT_SEQUENCE_LENGTH) + sequence_lengths_text = self._get_sequence_lengths( + tf_batch_data, TEXT_SEQUENCE_LENGTH, batch_dim=1 + ) + mask_text = self._compute_mask(sequence_lengths_text) + + text_transformed, _, _, _ = self._create_sequence( + tf_batch_data[TEXT_SEQUENCE_FEATURES], + tf_batch_data[TEXT_SENTENCE_FEATURES], + sequence_mask_text, + mask_text, + self.text_name, + ) + + out = {} + + if self.all_labels_embed is None: + _, self.all_labels_embed = self._create_all_labels() + + # get sentence feature vector for intent classification + sentence_vector = self._last_token(text_transformed, sequence_lengths_text) + sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector) + + sim_all = self._tf_layers[f"loss.{LABEL}"].sim( + sentence_vector_embed[:, tf.newaxis, :], + self.all_labels_embed[tf.newaxis, :, :], + ) + scores = self._tf_layers[f"loss.{LABEL}"].confidence_from_sim( + sim_all, self.config[SIMILARITY_TYPE] + ) + out["i_scores"] = scores + + return out diff --git a/rasa/nlu/test.py b/rasa/nlu/test.py index 9143638590c4..ca269a4b49c6 100644 --- a/rasa/nlu/test.py +++ b/rasa/nlu/test.py @@ -16,30 +16,45 @@ Dict, Any, ) - +import rasa.utils.plotting as plot_utils import rasa.utils.io as io_utils -from rasa.constants import TEST_DATA_FILE, TRAIN_DATA_FILE +from rasa.constants import TEST_DATA_FILE, TRAIN_DATA_FILE, NLG_DATA_FILE from rasa.nlu.constants import ( DEFAULT_OPEN_UTTERANCE_TYPE, - MESSAGE_SELECTOR_PROPERTY_NAME, + RESPONSE_SELECTOR_PROPERTY_NAME, OPEN_UTTERANCE_PREDICTION_KEY, + EXTRACTOR, + PRETRAINED_EXTRACTORS, + NO_ENTITY_TAG, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_ROLE, + RESPONSE, + INTENT, + TEXT, + ENTITIES, + TOKENS_NAMES, + ENTITY_ATTRIBUTE_CONFIDENCE_TYPE, + ENTITY_ATTRIBUTE_CONFIDENCE_ROLE, + ENTITY_ATTRIBUTE_CONFIDENCE_GROUP, + INTENT_NAME_KEY, ) from rasa.model import get_model -from rasa.nlu import config, training_data, utils -from rasa.nlu.utils import write_to_file from rasa.nlu.components import ComponentBuilder from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Interpreter, Trainer, TrainingData from rasa.nlu.components import Component -from rasa.nlu.tokenizers import Token -from rasa.core.constants import RESPOND_PREFIX +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.utils.tensorflow.constants import ENTITY_RECOGNITION logger = logging.getLogger(__name__) -PRETRAINED_EXTRACTORS = {"DucklingHTTPExtractor", "SpacyEntityExtractor"} +# Exclude 'EntitySynonymMapper' and 'ResponseSelector' as their super class +# performs entity extraction but those two classifiers don't +ENTITY_PROCESSORS = {"EntitySynonymMapper", "ResponseSelector"} -ENTITY_PROCESSORS = {"EntitySynonymMapper"} +EXTRACTORS_WITH_CONFIDENCES = {"CRFEntityExtractor", "DIETClassifier"} CVEvaluationResult = namedtuple("Results", "train test") @@ -51,7 +66,7 @@ ResponseSelectionEvaluationResult = namedtuple( "ResponseSelectionEvaluationResult", - "intent_target " "response_target " "response_prediction " "message " "confidence", + "intent_target response_target response_prediction message confidence", ) EntityEvaluationResult = namedtuple( @@ -60,147 +75,29 @@ IntentMetrics = Dict[Text, List[float]] EntityMetrics = Dict[Text, Dict[Text, List[float]]] - - -def plot_confusion_matrix( - cm: np.array, - classes: np.array, - normalize: bool = False, - title: Text = "Confusion matrix", - cmap=None, - zmin: int = 1, - out: Optional[Text] = None, -) -> None: # pragma: no cover - """Print and plot the confusion matrix for the intent classification. - Normalization can be applied by setting `normalize=True`.""" - import matplotlib.pyplot as plt - from matplotlib.colors import LogNorm - - zmax = cm.max() - plt.clf() - if not cmap: - cmap = plt.cm.Blues - plt.imshow( - cm, - interpolation="nearest", - cmap=cmap, - aspect="auto", - norm=LogNorm(vmin=zmin, vmax=zmax), - ) - plt.title(title) - plt.colorbar() - tick_marks = np.arange(len(classes)) - plt.xticks(tick_marks, classes, rotation=90) - plt.yticks(tick_marks, classes) - - if normalize: - cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] - logger.info("Normalized confusion matrix: \n{}".format(cm)) - else: - logger.info("Confusion matrix, without normalization: \n{}".format(cm)) - - thresh = cm.max() / 2.0 - for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): - plt.text( - j, - i, - cm[i, j], - horizontalalignment="center", - color="white" if cm[i, j] > thresh else "black", - ) - - plt.ylabel("True label") - plt.xlabel("Predicted label") - - # save confusion matrix to file before showing it - if out: - fig = plt.gcf() - fig.set_size_inches(20, 20) - fig.savefig(out, bbox_inches="tight") - - -def plot_histogram( - hist_data: List[List[float]], out: Optional[Text] = None -) -> None: # pragma: no cover - """Plot a histogram of the confidence distribution of the predictions in - two columns. - Wine-ish colour for the confidences of hits. - Blue-ish colour for the confidences of misses. - Saves the plot to a file.""" - import matplotlib.pyplot as plt - - colors = ["#009292", "#920000"] # - bins = [0.05 * i for i in range(1, 21)] - - plt.xlim([0, 1]) - plt.hist(hist_data, bins=bins, color=colors) - plt.xticks(bins) - plt.title("Intent Prediction Confidence Distribution") - plt.xlabel("Confidence") - plt.ylabel("Number of Samples") - plt.legend(["hits", "misses"]) - - if out: - fig = plt.gcf() - fig.set_size_inches(10, 10) - fig.savefig(out, bbox_inches="tight") +ResponseSelectionMetrics = Dict[Text, List[float]] def log_evaluation_table( report: Text, precision: float, f1: float, accuracy: float ) -> None: # pragma: no cover """Log the sklearn evaluation metrics.""" - - logger.info("F1-Score: {}".format(f1)) - logger.info("Precision: {}".format(precision)) - logger.info("Accuracy: {}".format(accuracy)) - logger.info("Classification report: \n{}".format(report)) - - -def get_evaluation_metrics( - targets: Iterable[Any], - predictions: Iterable[Any], - output_dict: bool = False, - exclude_label: Text = None, -) -> Tuple[Union[Text, Dict[Text, Dict[Text, float]]], float, float, float]: - """Compute the f1, precision, accuracy and summary report from sklearn.""" - from sklearn import metrics - - targets = clean_labels(targets) - predictions = clean_labels(predictions) - - labels = get_unique_labels(targets, exclude_label) - if not labels: - logger.warning("No labels to evaluate. Skip evaluation.") - return {}, 0.0, 0.0, 0.0 - - report = metrics.classification_report( - targets, predictions, labels=labels, output_dict=output_dict - ) - precision = metrics.precision_score( - targets, predictions, labels=labels, average="weighted" - ) - f1 = metrics.f1_score(targets, predictions, labels=labels, average="weighted") - accuracy = metrics.accuracy_score(targets, predictions) - - return report, precision, f1, accuracy - - -def get_unique_labels( - targets: Iterable[Any], exclude_label: Optional[Text] -) -> List[Text]: - """Get unique labels. Exclude 'exclude_label' if specified.""" - labels = set(targets) - if exclude_label and exclude_label in labels: - labels.remove(exclude_label) - return list(labels) + logger.info(f"F1-Score: {f1}") + logger.info(f"Precision: {precision}") + logger.info(f"Accuracy: {accuracy}") + logger.info(f"Classification report: \n{report}") def remove_empty_intent_examples( - intent_results: List[IntentEvaluationResult] + intent_results: List[IntentEvaluationResult], ) -> List[IntentEvaluationResult]: - """Remove those examples without an intent.""" + """Remove those examples without an intent. + + Args: + intent_results: intent evaluation results + Returns: intent evaluation results + """ filtered = [] for r in intent_results: # substitute None values with empty string @@ -215,9 +112,15 @@ def remove_empty_intent_examples( def remove_empty_response_examples( - response_results: List[ResponseSelectionEvaluationResult] + response_results: List[ResponseSelectionEvaluationResult], ) -> List[ResponseSelectionEvaluationResult]: - """Remove those examples without a response.""" + """Remove those examples without a response. + + Args: + response_results: response selection evaluation results + + Returns: response selection evaluation results + """ filtered = [] for r in response_results: @@ -232,36 +135,46 @@ def remove_empty_response_examples( return filtered -def clean_labels(labels: Iterable[Any]) -> List[Text]: - """Get rid of `None` intents. sklearn metrics do not support them.""" - return [l if l is not None else "" for l in labels] - +def drop_intents_below_freq( + training_data: TrainingData, cutoff: int = 5 +) -> TrainingData: + """Remove intent groups with less than cutoff instances. -def drop_intents_below_freq(td: TrainingData, cutoff: int = 5) -> TrainingData: - """Remove intent groups with less than cutoff instances.""" + Args: + training_data: training data + cutoff: threshold - logger.debug("Raw data intent examples: {}".format(len(td.intent_examples))) + Returns: updated training data + """ + logger.debug( + "Raw data intent examples: {}".format(len(training_data.intent_examples)) + ) keep_examples = [ ex - for ex in td.intent_examples - if td.examples_per_intent[ex.get("intent")] >= cutoff + for ex in training_data.intent_examples + if training_data.number_of_examples_per_intent[ex.get(INTENT)] >= cutoff ] - return TrainingData(keep_examples, td.entity_synonyms, td.regex_features) + return TrainingData( + keep_examples, training_data.entity_synonyms, training_data.regex_features + ) -def collect_nlu_successes( +def write_intent_successes( intent_results: List[IntentEvaluationResult], successes_filename: Text ) -> None: - """Log messages which result in successful predictions - and save them to file""" + """Write successful intent predictions to a file. + Args: + intent_results: intent evaluation result + successes_filename: filename of file to save successful predictions to + """ successes = [ { "text": r.message, "intent": r.intent_target, "intent_prediction": { - "name": r.intent_prediction, + INTENT_NAME_KEY: r.intent_prediction, "confidence": r.confidence, }, } @@ -270,28 +183,28 @@ def collect_nlu_successes( ] if successes: - utils.write_json_to_file(successes_filename, successes) - logger.info( - "Successful intent predictions saved to {}.".format(successes_filename) - ) - logger.debug( - "\n\nSuccessfully predicted the following intents: \n{}".format(successes) - ) + io_utils.dump_obj_as_json_to_file(successes_filename, successes) + logger.info(f"Successful intent predictions saved to {successes_filename}.") + logger.debug(f"\n\nSuccessfully predicted the following intents: \n{successes}") else: logger.info("No successful intent predictions found.") -def collect_nlu_errors( +def write_intent_errors( intent_results: List[IntentEvaluationResult], errors_filename: Text ) -> None: - """Log messages which result in wrong predictions and save them to file""" + """Write incorrect intent predictions to a file. + Args: + intent_results: intent evaluation result + errors_filename: filename of file to save incorrect predictions to + """ errors = [ { "text": r.message, "intent": r.intent_target, "intent_prediction": { - "name": r.intent_prediction, + INTENT_NAME_KEY: r.intent_prediction, "confidence": r.confidence, }, } @@ -300,8 +213,8 @@ def collect_nlu_errors( ] if errors: - utils.write_json_to_file(errors_filename, errors) - logger.info("Incorrect intent predictions saved to {}.".format(errors_filename)) + io_utils.dump_obj_as_json_to_file(errors_filename, errors) + logger.info(f"Incorrect intent predictions saved to {errors_filename}.") logger.debug( "\n\nThese intent examples could not be classified " "correctly: \n{}".format(errors) @@ -310,6 +223,74 @@ def collect_nlu_errors( logger.info("Your model predicted all intents successfully.") +def write_response_successes( + response_results: List[ResponseSelectionEvaluationResult], successes_filename: Text +) -> None: + """Write successful response selection predictions to a file. + + Args: + response_results: response selection evaluation result + successes_filename: filename of file to save successful predictions to + """ + + successes = [ + { + "text": r.message, + "intent_target": r.intent_target, + "response_target": r.response_target, + "response_prediction": { + "name": r.response_prediction, + "confidence": r.confidence, + }, + } + for r in response_results + if r.response_prediction == r.response_target + ] + + if successes: + io_utils.dump_obj_as_json_to_file(successes_filename, successes) + logger.info(f"Successful response predictions saved to {successes_filename}.") + logger.debug( + f"\n\nSuccessfully predicted the following responses: \n{successes}" + ) + else: + logger.info("No successful response predictions found.") + + +def write_response_errors( + response_results: List[ResponseSelectionEvaluationResult], errors_filename: Text +) -> None: + """Write incorrect response selection predictions to a file. + + Args: + response_results: response selection evaluation result + errors_filename: filename of file to save incorrect predictions to + """ + errors = [ + { + "text": r.message, + "intent_target": r.intent_target, + "response_target": r.response_target, + "response_prediction": { + "name": r.response_prediction, + "confidence": r.confidence, + }, + } + for r in response_results + if r.response_prediction != r.response_target + ] + + if errors: + io_utils.dump_obj_as_json_to_file(errors_filename, errors) + logger.info(f"Incorrect response predictions saved to {errors_filename}.") + logger.debug( + "\n\nThese response examples could not be classified " + "correctly: \n{}".format(errors) + ) + else: + logger.info("Your model predicted all responses successfully.") + + def plot_attribute_confidences( results: Union[ List[IntentEvaluationResult], List[ResponseSelectionEvaluationResult] @@ -317,11 +298,17 @@ def plot_attribute_confidences( hist_filename: Optional[Text], target_key: Text, prediction_key: Text, + title: Text, ) -> None: - import matplotlib.pyplot as plt + """Create histogram of confidence distribution. - # create histogram of confidence distribution, save to file and display - plt.gcf().clear() + Args: + results: evaluation results + hist_filename: filename to save plot to + target_key: key of target in results + prediction_key: key of predictions in results + title: title of plot + """ pos_hist = [ r.confidence for r in results @@ -334,12 +321,50 @@ def plot_attribute_confidences( if getattr(r, target_key) != getattr(r, prediction_key) ] - plot_histogram([pos_hist, neg_hist], hist_filename) + plot_utils.plot_histogram([pos_hist, neg_hist], title, hist_filename) + + +def plot_entity_confidences( + merged_targets: List[Text], + merged_predictions: List[Text], + merged_confidences: List[float], + hist_filename: Text, + title: Text, +) -> None: + """Create histogram of confidence distribution. + + Args: + results: evaluation results + hist_filename: filename to save plot to + target_key: key of target in results + prediction_key: key of predictions in results + title: title of plot + """ + pos_hist = [ + confidence + for target, prediction, confidence in zip( + merged_targets, merged_predictions, merged_confidences + ) + if target != NO_ENTITY and target == prediction + ] + + neg_hist = [ + confidence + for target, prediction, confidence in zip( + merged_targets, merged_predictions, merged_confidences + ) + if prediction != NO_ENTITY and target != prediction + ] + + plot_utils.plot_histogram([pos_hist, neg_hist], title, hist_filename) def evaluate_response_selections( response_selection_results: List[ResponseSelectionEvaluationResult], - report_folder: Optional[Text], + output_directory: Optional[Text], + successes: bool, + errors: bool, + disable_plotting: bool, ) -> Dict: # pragma: no cover """Creates summary statistics for response selection. @@ -347,33 +372,57 @@ def evaluate_response_selections( Others are filtered out. Returns a dictionary of containing the evaluation result. + Args: + response_selection_results: response selection evaluation results + output_directory: directory to store files to + successes: if True success are written down to disk + errors: if True errors are written down to disk + disable_plotting: if True no plots are created + + Returns: dictionary with evaluation results """ + import sklearn.metrics + import sklearn.utils.multiclass + from rasa.test import get_evaluation_metrics - # remove empty intent targets + # remove empty response targets num_examples = len(response_selection_results) response_selection_results = remove_empty_response_examples( response_selection_results ) logger.info( - "Response Selection Evaluation: Only considering those " - "{} examples that have a defined response out " - "of {} examples".format(len(response_selection_results), num_examples) + f"Response Selection Evaluation: Only considering those " + f"{len(response_selection_results)} examples that have a defined response out " + f"of {num_examples} examples." ) + response_to_intent_target = {} + for result in response_selection_results: + response_to_intent_target[result.response_target] = result.intent_target + target_responses, predicted_responses = _targets_predictions_from( response_selection_results, "response_target", "response_prediction" ) - if report_folder: + confusion_matrix = sklearn.metrics.confusion_matrix( + target_responses, predicted_responses + ) + labels = sklearn.utils.multiclass.unique_labels( + target_responses, predicted_responses + ) + + if output_directory: report, precision, f1, accuracy = get_evaluation_metrics( target_responses, predicted_responses, output_dict=True ) + report = _add_confused_labels_to_report(report, confusion_matrix, labels) - report_filename = os.path.join(report_folder, "response_selection_report.json") - - utils.write_json_to_file(report_filename, report) - logger.info("Classification report saved to {}.".format(report_filename)) + report_filename = os.path.join( + output_directory, "response_selection_report.json" + ) + io_utils.dump_obj_as_json_to_file(report_filename, report) + logger.info(f"Classification report saved to {report_filename}.") else: report, precision, f1, accuracy = get_evaluation_metrics( @@ -382,6 +431,50 @@ def evaluate_response_selections( if isinstance(report, str): log_evaluation_table(report, precision, f1, accuracy) + if successes: + successes_filename = "response_selection_successes.json" + if output_directory: + successes_filename = os.path.join(output_directory, successes_filename) + # save classified samples to file for debugging + write_response_successes(response_selection_results, successes_filename) + + if errors: + errors_filename = "response_selection_errors.json" + if output_directory: + errors_filename = os.path.join(output_directory, errors_filename) + # log and save misclassified samples to file for debugging + write_response_errors(response_selection_results, errors_filename) + + if not disable_plotting: + confusion_matrix_filename = "response_selection_confusion_matrix.png" + if output_directory: + confusion_matrix_filename = os.path.join( + output_directory, confusion_matrix_filename + ) + _labels = [ + response_to_intent_target[label] + if label in response_to_intent_target + else f"'{label[:20]}...' (response not present in test data)" + for label in labels + ] + plot_utils.plot_confusion_matrix( + confusion_matrix, + classes=_labels, + title="Response Selection Confusion Matrix", + output_file=confusion_matrix_filename, + ) + + histogram_filename = "response_selection_histogram.png" + if output_directory: + histogram_filename = os.path.join(output_directory, histogram_filename) + plot_attribute_confidences( + response_selection_results, + histogram_filename, + "response_target", + "response_prediction", + title="Response Selection Prediction Confidence Distribution", + ) + predictions = [ { "text": res.message, @@ -402,48 +495,111 @@ def evaluate_response_selections( } +def _add_confused_labels_to_report( + report: Dict[Text, Dict[Text, Any]], + confusion_matrix: np.ndarray, + labels: List[Text], + exclude_labels: Optional[List[Text]] = None, +) -> Dict[Text, Dict[Text, Union[Dict, Any]]]: + """Adds a field "confused_with" to the evaluation report. + + The value is a dict of {"false_positive_label": false_positive_count} pairs. + If there are no false positives in the confusion matrix, + the dict will be empty. Typically we include the two most + commonly false positive labels, three in the rare case that + the diagonal element in the confusion matrix is not one of the + three highest values in the row. + + Args: + report: the evaluation report + confusion_matrix: confusion matrix + labels: list of labels + + Returns: updated evaluation report + """ + if exclude_labels is None: + exclude_labels = [] + + # sort confusion matrix by false positives + indices = np.argsort(confusion_matrix, axis=1) + n_candidates = min(3, len(labels)) + + for label in labels: + if label in exclude_labels: + continue + # it is possible to predict intent 'None' + if report.get(label): + report[label]["confused_with"] = {} + + for i, label in enumerate(labels): + if label in exclude_labels: + continue + for j in range(n_candidates): + label_idx = indices[i, -(1 + j)] + false_pos_label = labels[label_idx] + false_positives = int(confusion_matrix[i, label_idx]) + if ( + false_pos_label != label + and false_pos_label not in exclude_labels + and false_positives > 0 + ): + report[label]["confused_with"][false_pos_label] = false_positives + + return report + + def evaluate_intents( intent_results: List[IntentEvaluationResult], output_directory: Optional[Text], successes: bool, errors: bool, - confmat_filename: Optional[Text], - intent_hist_filename: Optional[Text], + disable_plotting: bool, ) -> Dict: # pragma: no cover - """Creates a confusion matrix and summary statistics for intent predictions. + """Creates summary statistics for intents. - Log samples which could not be classified correctly and save them to file. - Creates a confidence histogram which is saved to file. - Wrong and correct prediction confidences will be - plotted in separate bars of the same histogram plot. - Only considers those examples with a set intent. - Others are filtered out. Returns a dictionary of containing the - evaluation result. + Only considers those examples with a set intent. Others are filtered out. + Returns a dictionary of containing the evaluation result. + + Args: + intent_results: intent evaluation results + output_directory: directory to store files to + successes: if True correct predictions are written to disk + errors: if True incorrect predictions are written to disk + disable_plotting: if True no plots are created + + Returns: dictionary with evaluation results """ + import sklearn.metrics + import sklearn.utils.multiclass + from rasa.test import get_evaluation_metrics # remove empty intent targets num_examples = len(intent_results) intent_results = remove_empty_intent_examples(intent_results) logger.info( - "Intent Evaluation: Only considering those " - "{} examples that have a defined intent out " - "of {} examples".format(len(intent_results), num_examples) + f"Intent Evaluation: Only considering those {len(intent_results)} examples " + f"that have a defined intent out of {num_examples} examples." ) target_intents, predicted_intents = _targets_predictions_from( intent_results, "intent_target", "intent_prediction" ) + confusion_matrix = sklearn.metrics.confusion_matrix( + target_intents, predicted_intents + ) + labels = sklearn.utils.multiclass.unique_labels(target_intents, predicted_intents) + if output_directory: report, precision, f1, accuracy = get_evaluation_metrics( target_intents, predicted_intents, output_dict=True ) + report = _add_confused_labels_to_report(report, confusion_matrix, labels) report_filename = os.path.join(output_directory, "intent_report.json") - - utils.write_json_to_file(report_filename, report) - logger.info("Classification report saved to {}.".format(report_filename)) + io_utils.dump_obj_as_json_to_file(report_filename, report) + logger.info(f"Classification report saved to {report_filename}.") else: report, precision, f1, accuracy = get_evaluation_metrics( @@ -457,40 +613,39 @@ def evaluate_intents( if output_directory: successes_filename = os.path.join(output_directory, successes_filename) # save classified samples to file for debugging - collect_nlu_successes(intent_results, successes_filename) + write_intent_successes(intent_results, successes_filename) if errors: errors_filename = "intent_errors.json" if output_directory: errors_filename = os.path.join(output_directory, errors_filename) # log and save misclassified samples to file for debugging - collect_nlu_errors(intent_results, errors_filename) - - if confmat_filename: - from sklearn.metrics import confusion_matrix - from sklearn.utils.multiclass import unique_labels - import matplotlib.pyplot as plt + write_intent_errors(intent_results, errors_filename) + if not disable_plotting: + confusion_matrix_filename = "intent_confusion_matrix.png" if output_directory: - confmat_filename = os.path.join(output_directory, confmat_filename) - intent_hist_filename = os.path.join(output_directory, intent_hist_filename) - - cnf_matrix = confusion_matrix(target_intents, predicted_intents) - labels = unique_labels(target_intents, predicted_intents) - plot_confusion_matrix( - cnf_matrix, + confusion_matrix_filename = os.path.join( + output_directory, confusion_matrix_filename + ) + plot_utils.plot_confusion_matrix( + confusion_matrix, classes=labels, title="Intent Confusion matrix", - out=confmat_filename, + output_file=confusion_matrix_filename, ) - plt.show(block=False) + histogram_filename = "intent_histogram.png" + if output_directory: + histogram_filename = os.path.join(output_directory, histogram_filename) plot_attribute_confidences( - intent_results, intent_hist_filename, "intent_target", "intent_prediction" + intent_results, + histogram_filename, + "intent_target", + "intent_prediction", + title="Intent Prediction Confidence Distribution", ) - plt.show(block=False) - predictions = [ { "text": res.message, @@ -512,22 +667,56 @@ def evaluate_intents( def merge_labels( aligned_predictions: List[Dict], extractor: Optional[Text] = None -) -> np.array: +) -> List[Text]: """Concatenates all labels of the aligned predictions. + Takes the aligned prediction labels which are grouped for each message - and concatenates them.""" + and concatenates them. + + Args: + aligned_predictions: aligned predictions + extractor: entity extractor name + + Returns: concatenated predictions + """ if extractor: label_lists = [ap["extractor_labels"][extractor] for ap in aligned_predictions] else: label_lists = [ap["target_labels"] for ap in aligned_predictions] - flattened = list(itertools.chain(*label_lists)) - return np.array(flattened) + return list(itertools.chain(*label_lists)) + + +def merge_confidences( + aligned_predictions: List[Dict], extractor: Optional[Text] = None +) -> List[float]: + """Concatenates all confidences of the aligned predictions. + + Takes the aligned prediction confidences which are grouped for each message + and concatenates them. + + Args: + aligned_predictions: aligned predictions + extractor: entity extractor name + + Returns: concatenated confidences + """ + + label_lists = [ap["confidences"][extractor] for ap in aligned_predictions] + return list(itertools.chain(*label_lists)) def substitute_labels(labels: List[Text], old: Text, new: Text) -> List[Text]: - """Replaces label names in a list of labels.""" + """Replaces label names in a list of labels. + + Args: + labels: list of labels + old: old label name that should be replaced + new: new label name + + Returns: updated labels + """ return [new if label == old else label for label in labels] @@ -536,14 +725,22 @@ def write_incorrect_entity_predictions( merged_targets: List[Text], merged_predictions: List[Text], error_filename: Text, -): +) -> None: + """Write incorrect entity predictions to a file. + + Args: + entity_results: response selection evaluation result + merged_predictions: list of predicted entity labels + merged_targets: list of true entity labels + error_filename: filename of file to save incorrect predictions to + """ errors = collect_incorrect_entity_predictions( entity_results, merged_predictions, merged_targets ) if errors: - utils.write_json_to_file(error_filename, errors) - logger.info("Incorrect entity predictions saved to {}.".format(error_filename)) + io_utils.dump_obj_as_json_to_file(error_filename, errors) + logger.info(f"Incorrect entity predictions saved to {error_filename}.") logger.debug( "\n\nThese intent examples could not be classified " "correctly: \n{}".format(errors) @@ -557,6 +754,15 @@ def collect_incorrect_entity_predictions( merged_predictions: List[Text], merged_targets: List[Text], ): + """Get incorrect entity predictions. + + Args: + entity_results: entity evaluation results + merged_predictions: list of predicted entity labels + merged_targets: list of true entity labels + + Returns: list of incorrect predictions + """ errors = [] offset = 0 for entity_result in entity_results: @@ -579,18 +785,24 @@ def write_successful_entity_predictions( merged_targets: List[Text], merged_predictions: List[Text], successes_filename: Text, -): +) -> None: + """Write correct entity predictions to a file. + + Args: + entity_results: response selection evaluation result + merged_predictions: list of predicted entity labels + merged_targets: list of true entity labels + successes_filename: filename of file to save correct predictions to + """ successes = collect_successful_entity_predictions( entity_results, merged_predictions, merged_targets ) if successes: - utils.write_json_to_file(successes_filename, successes) - logger.info( - "Successful entity predictions saved to {}.".format(successes_filename) - ) + io_utils.dump_obj_as_json_to_file(successes_filename, successes) + logger.info(f"Successful entity predictions saved to {successes_filename}.") logger.debug( - "\n\nSuccessfully predicted the following entities: \n{}".format(successes) + f"\n\nSuccessfully predicted the following entities: \n{successes}" ) else: logger.info("No successful entity prediction found.") @@ -601,6 +813,15 @@ def collect_successful_entity_predictions( merged_predictions: List[Text], merged_targets: List[Text], ): + """Get correct entity predictions. + + Args: + entity_results: entity evaluation results + merged_predictions: list of predicted entity labels + merged_targets: list of true entity labels + + Returns: list of correct predictions + """ successes = [] offset = 0 for entity_result in entity_results: @@ -625,24 +846,51 @@ def evaluate_entities( entity_results: List[EntityEvaluationResult], extractors: Set[Text], output_directory: Optional[Text], - successes: bool = False, - errors: bool = False, + successes: bool, + errors: bool, + disable_plotting: bool, ) -> Dict: # pragma: no cover """Creates summary statistics for each entity extractor. - Logs precision, recall, and F1 per entity type for each extractor.""" + + Logs precision, recall, and F1 per entity type for each extractor. + + Args: + entity_results: entity evaluation results + extractors: entity extractors to consider + output_directory: directory to store files to + successes: if True correct predictions are written to disk + errors: if True incorrect predictions are written to disk + disable_plotting: if True no plots are created + + Returns: dictionary with evaluation results + """ + import sklearn.metrics + import sklearn.utils.multiclass + from rasa.test import get_evaluation_metrics aligned_predictions = align_all_entity_predictions(entity_results, extractors) merged_targets = merge_labels(aligned_predictions) - merged_targets = substitute_labels(merged_targets, "O", NO_ENTITY) + merged_targets = substitute_labels(merged_targets, NO_ENTITY_TAG, NO_ENTITY) result = {} for extractor in extractors: merged_predictions = merge_labels(aligned_predictions, extractor) - merged_predictions = substitute_labels(merged_predictions, "O", NO_ENTITY) - logger.info("Evaluation for entity extractor: {} ".format(extractor)) + merged_predictions = substitute_labels( + merged_predictions, NO_ENTITY_TAG, NO_ENTITY + ) + + logger.info(f"Evaluation for entity extractor: {extractor} ") + + confusion_matrix = sklearn.metrics.confusion_matrix( + merged_targets, merged_predictions + ) + labels = sklearn.utils.multiclass.unique_labels( + merged_targets, merged_predictions + ) + if output_directory: - report_filename = "{}_report.json".format(extractor) + report_filename = f"{extractor}_report.json" extractor_report_filename = os.path.join(output_directory, report_filename) report, precision, f1, accuracy = get_evaluation_metrics( @@ -651,7 +899,11 @@ def evaluate_entities( output_dict=True, exclude_label=NO_ENTITY, ) - utils.write_json_to_file(extractor_report_filename, report) + report = _add_confused_labels_to_report( + report, confusion_matrix, labels, [NO_ENTITY] + ) + + io_utils.dump_obj_as_json_to_file(extractor_report_filename, report) logger.info( "Classification report for '{}' saved to '{}'." @@ -669,7 +921,7 @@ def evaluate_entities( log_evaluation_table(report, precision, f1, accuracy) if successes: - successes_filename = "{}_successes.json".format(extractor) + successes_filename = f"{extractor}_successes.json" if output_directory: successes_filename = os.path.join(output_directory, successes_filename) # save classified samples to file for debugging @@ -678,7 +930,7 @@ def evaluate_entities( ) if errors: - errors_filename = "{}_errors.json".format(extractor) + errors_filename = f"{extractor}_errors.json" if output_directory: errors_filename = os.path.join(output_directory, errors_filename) # log and save misclassified samples to file for debugging @@ -686,6 +938,34 @@ def evaluate_entities( entity_results, merged_targets, merged_predictions, errors_filename ) + if not disable_plotting: + confusion_matrix_filename = f"{extractor}_confusion_matrix.png" + if output_directory: + confusion_matrix_filename = os.path.join( + output_directory, confusion_matrix_filename + ) + plot_utils.plot_confusion_matrix( + confusion_matrix, + classes=labels, + title="Entity Confusion matrix", + output_file=confusion_matrix_filename, + ) + + if extractor in EXTRACTORS_WITH_CONFIDENCES: + merged_confidences = merge_confidences(aligned_predictions, extractor) + histogram_filename = f"{extractor}_histogram.png" + if output_directory: + histogram_filename = os.path.join( + output_directory, histogram_filename + ) + plot_entity_confidences( + merged_targets, + merged_predictions, + merged_confidences, + title="Entity Confusion matrix", + hist_filename=histogram_filename, + ) + result[extractor] = { "report": report, "precision": precision, @@ -711,18 +991,21 @@ def does_token_cross_borders(token: Token, entity: Dict) -> bool: def determine_intersection(token: Token, entity: Dict) -> int: """Calculates how many characters a given token and entity share.""" - pos_token = set(range(token.offset, token.end)) + pos_token = set(range(token.start, token.end)) pos_entity = set(range(entity["start"], entity["end"])) return len(pos_token.intersection(pos_entity)) def do_entities_overlap(entities: List[Dict]) -> bool: """Checks if entities overlap. + I.e. cross each others start and end boundaries. - :param entities: list of entities - :return: boolean - """ + Args: + entities: list of entities + + Returns: true if entities overlap, false otherwise. + """ sorted_entities = sorted(entities, key=lambda e: e["start"]) for i in range(len(sorted_entities) - 1): curr_ent = sorted_entities[i] @@ -731,19 +1014,21 @@ def do_entities_overlap(entities: List[Dict]) -> bool: next_ent["start"] < curr_ent["end"] and next_ent["entity"] != curr_ent["entity"] ): - logger.warn("Overlapping entity {} with {}".format(curr_ent, next_ent)) + logger.warning(f"Overlapping entity {curr_ent} with {next_ent}") return True return False -def find_intersecting_entites(token: Token, entities: List[Dict]) -> List[Dict]: +def find_intersecting_entities(token: Token, entities: List[Dict]) -> List[Dict]: """Finds the entities that intersect with a token. - :param token: a single token - :param entities: entities found by a single extractor - :return: list of entities - """ + Args: + token: a single token + entities: entities found by a single extractor + + Returns: list of entities + """ candidates = [] for e in entities: if is_token_within_entity(token, e): @@ -753,45 +1038,88 @@ def find_intersecting_entites(token: Token, entities: List[Dict]) -> List[Dict]: logger.debug( "Token boundary error for token {}({}, {}) " "and entity {}" - "".format(token.text, token.offset, token.end, e) + "".format(token.text, token.start, token.end, e) ) return candidates -def pick_best_entity_fit(token: Token, candidates: List[Dict]) -> Text: - """Determines the token label given intersecting entities. - :param token: a single token - :param candidates: entities found by a single extractor - :return: entity type +def pick_best_entity_fit( + token: Token, candidates: List[Dict[Text, Any]] +) -> Optional[Dict[Text, Any]]: """ + Determines the best fitting entity given intersecting entities. + Args: + token: a single token + candidates: entities found by a single extractor + attribute_key: the attribute key of interest + + Returns: + the value of the attribute key of the best fitting entity + """ if len(candidates) == 0: - return "O" + return None elif len(candidates) == 1: - return candidates[0]["entity"] + return candidates[0] else: best_fit = np.argmax([determine_intersection(token, c) for c in candidates]) - return candidates[best_fit]["entity"] + return candidates[int(best_fit)] def determine_token_labels( - token: Token, entities: List[Dict], extractors: Optional[Set[Text]] + token: Token, + entities: List[Dict], + extractors: Optional[Set[Text]] = None, + attribute_key: Text = ENTITY_ATTRIBUTE_TYPE, ) -> Text: - """Determines the token label given entities that do not overlap. + """ + Determines the token label for the provided attribute key given entities that do + not overlap. + Args: token: a single token entities: entities found by a single extractor extractors: list of extractors + attribute_key: the attribute key for which the entity type should be returned Returns: entity type """ + entity = determine_entity_for_token(token, entities, extractors) - if len(entities) == 0: - return "O" + if entity is None: + return NO_ENTITY_TAG + + label = entity.get(attribute_key) + + if not label: + return NO_ENTITY_TAG + + return label + + +def determine_entity_for_token( + token: Token, + entities: List[Dict[Text, Any]], + extractors: Optional[Set[Text]] = None, +) -> Optional[Dict[Text, Any]]: + """ + Determines the best fitting entity for the given token, given entities that do + not overlap. + + Args: + token: a single token + entities: entities found by a single extractor + extractors: list of extractors + + Returns: + entity type + """ + if entities is None or len(entities) == 0: + return None if not do_extractors_support_overlap(extractors) and do_entities_overlap(entities): - raise ValueError("The possible entities should not overlap") + raise ValueError("The possible entities should not overlap.") - candidates = find_intersecting_entites(token, entities) + candidates = find_intersecting_entities(token, entities) return pick_best_entity_fit(token, candidates) @@ -809,45 +1137,122 @@ def align_entity_predictions( result: EntityEvaluationResult, extractors: Set[Text] ) -> Dict: """Aligns entity predictions to the message tokens. + Determines for every token the true label based on the prediction targets and the label assigned by each single extractor. - :param result: entity prediction result - :param extractors: the entity extractors that should be considered - :return: dictionary containing the true token labels and token labels + + Args: + result: entity evaluation result + extractors: the entity extractors that should be considered + + Returns: dictionary containing the true token labels and token labels from the extractors """ - true_token_labels = [] - entities_by_extractors = { + entities_by_extractors: Dict[Text, List] = { extractor: [] for extractor in extractors - } # type: Dict[Text, List] + } for p in result.entity_predictions: - entities_by_extractors[p["extractor"]].append(p) - extractor_labels = { + entities_by_extractors[p[EXTRACTOR]].append(p) + extractor_labels: Dict[Text, List] = {extractor: [] for extractor in extractors} + extractor_confidences: Dict[Text, List] = { extractor: [] for extractor in extractors - } # type: Dict[Text, List] + } for t in result.tokens: - true_token_labels.append(determine_token_labels(t, result.entity_targets, None)) + true_token_labels.append(_concat_entity_labels(t, result.entity_targets)) for extractor, entities in entities_by_extractors.items(): - extracted = determine_token_labels(t, entities, set([extractor])) - extractor_labels[extractor].append(extracted) + extracted_labels = _concat_entity_labels(t, entities, {extractor}) + extracted_confidences = _get_entity_confidences(t, entities, {extractor}) + extractor_labels[extractor].append(extracted_labels) + extractor_confidences[extractor].append(extracted_confidences) return { "target_labels": true_token_labels, - "extractor_labels": dict(extractor_labels), + "extractor_labels": extractor_labels, + "confidences": extractor_confidences, } +def _concat_entity_labels( + token: Token, entities: List[Dict], extractors: Optional[Set[Text]] = None +) -> Text: + """Concatenate labels for entity type, role, and group for evaluation. + + In order to calculate metrics also for entity type, role, and group we need to + concatenate their labels. For example, 'location.destination'. This allows + us to report metrics for every combination of entity type, role, and group. + + Args: + token: the token we are looking at + entities: the available entities + extractors: the extractor of interest + + Returns: + the entity label of the provided token + """ + entity_label = determine_token_labels( + token, entities, extractors, ENTITY_ATTRIBUTE_TYPE + ) + group_label = determine_token_labels( + token, entities, extractors, ENTITY_ATTRIBUTE_GROUP + ) + role_label = determine_token_labels( + token, entities, extractors, ENTITY_ATTRIBUTE_ROLE + ) + + if entity_label == role_label == group_label == NO_ENTITY_TAG: + return NO_ENTITY_TAG + + labels = [entity_label, group_label, role_label] + labels = [label for label in labels if label != NO_ENTITY_TAG] + + return ".".join(labels) + + +def _get_entity_confidences( + token: Token, entities: List[Dict], extractors: Optional[Set[Text]] = None +) -> float: + """Get the confidence value of the best fitting entity. + + If multiple confidence values are present, e.g. for type, role, group, we + pick the lowest confidence value. + + Args: + token: the token we are looking at + entities: the available entities + extractors: the extractor of interest + + Returns: + the confidence value + """ + entity = determine_entity_for_token(token, entities, extractors) + + if entity is None: + return 0.0 + + if entity.get("extractor") not in EXTRACTORS_WITH_CONFIDENCES: + return 0.0 + + conf_type = entity.get(ENTITY_ATTRIBUTE_CONFIDENCE_TYPE) or 1.0 + conf_role = entity.get(ENTITY_ATTRIBUTE_CONFIDENCE_ROLE) or 1.0 + conf_group = entity.get(ENTITY_ATTRIBUTE_CONFIDENCE_GROUP) or 1.0 + + return min(conf_type, conf_role, conf_group) + + def align_all_entity_predictions( entity_results: List[EntityEvaluationResult], extractors: Set[Text] ) -> List[Dict]: - """ Aligns entity predictions to the message tokens for the whole dataset - using align_entity_predictions - :param entity_results: list of entity prediction results - :param extractors: the entity extractors that should be considered - :return: list of dictionaries containing the true token labels and token - labels from the extractors + """Aligns entity predictions to the message tokens for the whole dataset + using align_entity_predictions. + + Args: + entity_results: list of entity prediction results + extractors: the entity extractors that should be considered + + Returns: list of dictionaries containing the true token labels and token + labels from the extractors """ aligned_predictions = [] for result in entity_results: @@ -866,19 +1271,26 @@ def get_eval_data( """Runs the model for the test set and extracts targets and predictions. Returns intent results (intent targets and predictions, the original - messages and the confidences of the predictions), as well as entity - results(entity_targets, entity_predictions, and tokens).""" + messages and the confidences of the predictions), response results ( + response targets and predictions) as well as entity results + (entity_targets, entity_predictions, and tokens). + + Args: + interpreter: the interpreter + test_data: test data + Returns: intent, response, and entity evaluation results + """ logger.info("Running model for predictions:") intent_results, entity_results, response_selection_results = [], [], [] response_labels = [ - e.get("response") + e.get(RESPONSE) for e in test_data.intent_examples - if e.get("response") is not None + if e.get(RESPONSE) is not None ] - intent_labels = [e.get("intent") for e in test_data.intent_examples] + intent_labels = [e.get(INTENT) for e in test_data.intent_examples] should_eval_intents = ( is_intent_classifier_present(interpreter) and len(set(intent_labels)) >= 2 ) @@ -895,21 +1307,22 @@ def get_eval_data( result = interpreter.parse(example.text, only_output_properties=False) if should_eval_intents: - intent_prediction = result.get("intent", {}) or {} + intent_prediction = result.get(INTENT, {}) or {} intent_results.append( IntentEvaluationResult( - example.get("intent", ""), - intent_prediction.get("name"), - result.get("text", {}), + example.get(INTENT, ""), + intent_prediction.get(INTENT_NAME_KEY), + result.get(TEXT, {}), intent_prediction.get("confidence"), ) ) if should_eval_response_selection: - # including all examples here. Empty response examples are filtered at the time of metric calculation - intent_target = example.get("intent", "") - selector_properties = result.get(MESSAGE_SELECTOR_PROPERTY_NAME, {}) + # including all examples here. Empty response examples are filtered at the + # time of metric calculation + intent_target = example.get(INTENT, "") + selector_properties = result.get(RESPONSE_SELECTOR_PROPERTY_NAME, {}) if intent_target in available_response_selector_types: response_prediction_key = intent_target @@ -920,14 +1333,16 @@ def get_eval_data( response_prediction_key, {} ).get(OPEN_UTTERANCE_PREDICTION_KEY, {}) - response_target = example.get("response", "") + response_target = example.get(RESPONSE, "") + + complete_intent = example.get_combined_intent_response_key() response_selection_results.append( ResponseSelectionEvaluationResult( - intent_target, + complete_intent, response_target, response_prediction.get("name"), - result.get("text", {}), + result.get(TEXT, {}), response_prediction.get("confidence"), ) ) @@ -935,10 +1350,10 @@ def get_eval_data( if should_eval_entities: entity_results.append( EntityEvaluationResult( - example.get("entities", []), - result.get("entities", []), - result.get("tokens", []), - result.get("text", ""), + example.get(ENTITIES, []), + result.get(ENTITIES, []), + result.get(TOKENS_NAMES[TEXT], []), + result.get(TEXT, ""), ) ) @@ -947,51 +1362,84 @@ def get_eval_data( def get_entity_extractors(interpreter: Interpreter) -> Set[Text]: """Finds the names of entity extractors used by the interpreter. - Processors are removed since they do not - detect the boundaries themselves.""" - extractors = set([c.name for c in interpreter.pipeline if "entities" in c.provides]) + Processors are removed since they do not detect the boundaries themselves. + + Args: + interpreter: the interpreter + + Returns: entity extractor names + """ + from rasa.nlu.extractors.extractor import EntityExtractor + from rasa.nlu.classifiers.diet_classifier import DIETClassifier + + extractors = set() + for c in interpreter.pipeline: + if isinstance(c, EntityExtractor): + if isinstance(c, DIETClassifier): + if c.component_config[ENTITY_RECOGNITION]: + extractors.add(c.name) + else: + extractors.add(c.name) + return extractors - ENTITY_PROCESSORS def is_entity_extractor_present(interpreter: Interpreter) -> bool: - """Checks whether entity extractor is present""" + """Checks whether entity extractor is present.""" extractors = get_entity_extractors(interpreter) return extractors != [] def is_intent_classifier_present(interpreter: Interpreter) -> bool: - """Checks whether intent classifier is present""" + """Checks whether intent classifier is present.""" + + from rasa.nlu.classifiers.classifier import IntentClassifier intent_classifiers = [ - c.name for c in interpreter.pipeline if "intent" in c.provides + c.name for c in interpreter.pipeline if isinstance(c, IntentClassifier) ] return intent_classifiers != [] def is_response_selector_present(interpreter: Interpreter) -> bool: - """Checks whether response selector is present""" + """Checks whether response selector is present.""" + + from rasa.nlu.selectors.response_selector import ResponseSelector response_selectors = [ - c.name for c in interpreter.pipeline if "response" in c.provides + c.name for c in interpreter.pipeline if isinstance(c, ResponseSelector) ] return response_selectors != [] def get_available_response_selector_types(interpreter: Interpreter) -> List[Text]: - """Gets all available response selector types""" + """Gets all available response selector types.""" + + from rasa.nlu.selectors.response_selector import ResponseSelector response_selector_types = [ - c.retrieval_intent for c in interpreter.pipeline if "response" in c.provides + c.retrieval_intent + for c in interpreter.pipeline + if isinstance(c, ResponseSelector) ] return response_selector_types def remove_pretrained_extractors(pipeline: List[Component]) -> List[Component]: - """Removes pretrained extractors from the pipeline so that entities - from pre-trained extractors are not predicted upon parsing""" + """Remove pre-trained extractors from the pipeline. + + Remove pre-trained extractors so that entities from pre-trained extractors + are not predicted upon parsing. + + Args: + pipeline: the pipeline + + Returns: + Updated pipeline + """ pipeline = [c for c in pipeline if c.name not in PRETRAINED_EXTRACTORS] return pipeline @@ -1002,36 +1450,37 @@ def run_evaluation( output_directory: Optional[Text] = None, successes: bool = False, errors: bool = False, - confmat: Optional[Text] = None, - histogram: Optional[Text] = None, component_builder: Optional[ComponentBuilder] = None, + disable_plotting: bool = False, ) -> Dict: # pragma: no cover + """Evaluate intent classification, response selection and entity extraction. + + Args: + data_path: path to the test data + model_path: path to the model + output_directory: path to folder where all output will be stored + successes: if true successful predictions are written to a file + errors: if true incorrect predictions are written to a file + component_builder: component builder + disable_plotting: if true confusion matrix and histogram will not be rendered + + Returns: dictionary containing evaluation results """ - Evaluate intent classification, response selection and entity extraction. - - :param data_path: path to the test data - :param model_path: path to the model - :param output_directory: path to folder where all output will be stored - :param successes: if true successful predictions are written to a file - :param errors: if true incorrect predictions are written to a file - :param confmat: path to file that will show the confusion matrix - :param histogram: path fo file that will show a histogram - :param component_builder: component builder - - :return: dictionary containing evaluation results - """ + import rasa.nlu.training_data # get the metadata config from the package data interpreter = Interpreter.load(model_path, component_builder) interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline) - test_data = training_data.load_data(data_path, interpreter.model_metadata.language) + test_data = rasa.nlu.training_data.load_data( + data_path, interpreter.model_metadata.language + ) - result = { + result: Dict[Text, Optional[Dict]] = { "intent_evaluation": None, "entity_evaluation": None, "response_selection_evaluation": None, - } # type: Dict[Text, Optional[Dict]] + } if output_directory: io_utils.create_directory(output_directory) @@ -1043,49 +1492,61 @@ def run_evaluation( if intent_results: logger.info("Intent evaluation results:") result["intent_evaluation"] = evaluate_intents( - intent_results, output_directory, successes, errors, confmat, histogram + intent_results, output_directory, successes, errors, disable_plotting ) if response_selection_results: logger.info("Response selection evaluation results:") result["response_selection_evaluation"] = evaluate_response_selections( - response_selection_results, output_directory + response_selection_results, + output_directory, + successes, + errors, + disable_plotting, ) if entity_results: logger.info("Entity evaluation results:") extractors = get_entity_extractors(interpreter) result["entity_evaluation"] = evaluate_entities( - entity_results, extractors, output_directory, successes, errors + entity_results, + extractors, + output_directory, + successes, + errors, + disable_plotting, ) return result def generate_folds( - n: int, td: TrainingData + n: int, training_data: TrainingData ) -> Iterator[Tuple[TrainingData, TrainingData]]: - """Generates n cross validation folds for training data td.""" + """Generates n cross validation folds for given training data.""" from sklearn.model_selection import StratifiedKFold skf = StratifiedKFold(n_splits=n, shuffle=True) - x = td.intent_examples - y = [example.get("intent") for example in x] + x = training_data.intent_examples + + # Get labels with response key appended to intent name because we want a + # stratified split on all intents(including retrieval intents if they exist) + y = [example.get_combined_intent_response_key() for example in x] for i_fold, (train_index, test_index) in enumerate(skf.split(x, y)): - logger.debug("Fold: {}".format(i_fold)) + logger.debug(f"Fold: {i_fold}") train = [x[i] for i in train_index] test = [x[i] for i in test_index] yield ( TrainingData( training_examples=train, - entity_synonyms=td.entity_synonyms, - regex_features=td.regex_features, + entity_synonyms=training_data.entity_synonyms, + regex_features=training_data.regex_features, ), TrainingData( training_examples=test, - entity_synonyms=td.entity_synonyms, - regex_features=td.regex_features, + entity_synonyms=training_data.entity_synonyms, + regex_features=training_data.regex_features, ), ) @@ -1093,21 +1554,40 @@ def generate_folds( def combine_result( intent_metrics: IntentMetrics, entity_metrics: EntityMetrics, + response_selection_metrics: ResponseSelectionMetrics, interpreter: Interpreter, data: TrainingData, intent_results: Optional[List[IntentEvaluationResult]] = None, entity_results: Optional[List[EntityEvaluationResult]] = None, -) -> Tuple[IntentMetrics, EntityMetrics]: - """Collects intent and entity metrics for crossvalidation folds. - If `intent_results` or `entity_results` is provided as a list, prediction results - are also collected. - """ + response_selection_results: Optional[ + List[ResponseSelectionEvaluationResult] + ] = None, +) -> Tuple[IntentMetrics, EntityMetrics, ResponseSelectionMetrics]: + """Collects intent, response selection and entity metrics for cross validation + folds. + + If `intent_results`, `response_selection_results` or `entity_results` is provided + as a list, prediction results are also collected. + Args: + intent_metrics: intent metrics + entity_metrics: entity metrics + response_selection_metrics: response selection metrics + interpreter: the interpreter + data: training data + intent_results: intent evaluation results + entity_results: entity evaluation results + response_selection_results: reponse selection evaluation results + + Returns: intent, entity, and response selection metrics + """ ( intent_current_metrics, entity_current_metrics, + response_selection_current_metrics, current_intent_results, current_entity_results, + current_response_selection_results, ) = compute_metrics(interpreter, data) if intent_results is not None: @@ -1116,15 +1596,28 @@ def combine_result( if entity_results is not None: entity_results += current_entity_results + if response_selection_results is not None: + response_selection_results += current_response_selection_results + for k, v in intent_current_metrics.items(): intent_metrics[k] = v + intent_metrics[k] + for k, v in response_selection_current_metrics.items(): + response_selection_metrics[k] = v + response_selection_metrics[k] + for extractor, extractor_metric in entity_current_metrics.items(): entity_metrics[extractor] = { k: v + entity_metrics[extractor][k] for k, v in extractor_metric.items() } - return intent_metrics, entity_metrics + return intent_metrics, entity_metrics, response_selection_metrics + + +def _contains_entity_labels(entity_results: List[EntityEvaluationResult]) -> bool: + + for result in entity_results: + if result.entity_targets or result.entity_predictions: + return True def cross_validate( @@ -1134,29 +1627,28 @@ def cross_validate( output: Optional[Text] = None, successes: bool = False, errors: bool = False, - confmat: Optional[Text] = None, - histogram: Optional[Text] = None, -) -> Tuple[CVEvaluationResult, CVEvaluationResult]: + disable_plotting: bool = False, +) -> Tuple[CVEvaluationResult, CVEvaluationResult, CVEvaluationResult]: """Stratified cross validation on data. Args: data: Training Data n_folds: integer, number of cv folds nlu_config: nlu config file - report: path to folder where reports are stored + output: path to folder where reports are stored successes: if true successful predictions are written to a file errors: if true incorrect predictions are written to a file - confmat: path to file that will show the confusion matrix - histogram: path fo file that will show a histogram + disable_plotting: if true no confusion matrix and historgram plates are created Returns: dictionary with key, list structure, where each entry in list corresponds to the relevant result for one fold """ + import rasa.nlu.config from collections import defaultdict if isinstance(nlu_config, str): - nlu_config = config.load(nlu_config) + nlu_config = rasa.nlu.config.load(nlu_config) if output: io_utils.create_directory(output) @@ -1164,50 +1656,86 @@ def cross_validate( trainer = Trainer(nlu_config) trainer.pipeline = remove_pretrained_extractors(trainer.pipeline) - intent_train_metrics = defaultdict(list) # type: IntentMetrics - intent_test_metrics = defaultdict(list) # type: IntentMetrics - entity_train_metrics = defaultdict(lambda: defaultdict(list)) # type: EntityMetrics - entity_test_metrics = defaultdict(lambda: defaultdict(list)) # type: EntityMetrics + intent_train_metrics: IntentMetrics = defaultdict(list) + intent_test_metrics: IntentMetrics = defaultdict(list) + entity_train_metrics: EntityMetrics = defaultdict(lambda: defaultdict(list)) + entity_test_metrics: EntityMetrics = defaultdict(lambda: defaultdict(list)) + response_selection_train_metrics: ResponseSelectionMetrics = defaultdict(list) + response_selection_test_metrics: ResponseSelectionMetrics = defaultdict(list) - intent_test_results = [] # type: List[IntentEvaluationResult] - entity_test_results = [] # type: List[EntityEvaluationResult] + intent_test_results: List[IntentEvaluationResult] = [] + entity_test_results: List[EntityEvaluationResult] = [] + response_selection_test_results: List[ResponseSelectionEvaluationResult] = ([]) intent_classifier_present = False - extractors = set() # type: Set[Text] + response_selector_present = False + entity_evaluation_possible = False + extractors: Set[Text] = set() for train, test in generate_folds(n_folds, data): interpreter = trainer.train(train) # calculate train accuracy - combine_result(intent_train_metrics, entity_train_metrics, interpreter, train) + combine_result( + intent_train_metrics, + entity_train_metrics, + response_selection_train_metrics, + interpreter, + train, + ) # calculate test accuracy combine_result( intent_test_metrics, entity_test_metrics, + response_selection_test_metrics, interpreter, test, intent_test_results, entity_test_results, + response_selection_test_results, ) if not extractors: extractors = get_entity_extractors(interpreter) + entity_evaluation_possible = ( + entity_evaluation_possible + or _contains_entity_labels(entity_test_results) + ) if is_intent_classifier_present(interpreter): intent_classifier_present = True - if intent_classifier_present: + if is_response_selector_present(interpreter): + response_selector_present = True + + if intent_classifier_present and intent_test_results: logger.info("Accumulated test folds intent evaluation results:") evaluate_intents( - intent_test_results, output, successes, errors, confmat, histogram + intent_test_results, output, successes, errors, disable_plotting ) - if extractors: + if extractors and entity_evaluation_possible: logger.info("Accumulated test folds entity evaluation results:") - evaluate_entities(entity_test_results, extractors, output, successes, errors) + evaluate_entities( + entity_test_results, extractors, output, successes, errors, disable_plotting + ) + + if response_selector_present and response_selection_test_results: + logger.info("Accumulated test folds response selection evaluation results:") + evaluate_response_selections( + response_selection_test_results, output, successes, errors, disable_plotting + ) + + if not entity_evaluation_possible: + entity_test_metrics = defaultdict(lambda: defaultdict(list)) + entity_train_metrics = defaultdict(lambda: defaultdict(list)) return ( CVEvaluationResult(dict(intent_train_metrics), dict(intent_test_metrics)), CVEvaluationResult(dict(entity_train_metrics), dict(entity_test_metrics)), + CVEvaluationResult( + dict(response_selection_train_metrics), + dict(response_selection_test_metrics), + ), ) @@ -1222,29 +1750,58 @@ def _targets_predictions_from( def compute_metrics( - interpreter: Interpreter, corpus: TrainingData + interpreter: Interpreter, training_data: TrainingData ) -> Tuple[ IntentMetrics, EntityMetrics, + ResponseSelectionMetrics, List[IntentEvaluationResult], List[EntityEvaluationResult], + List[ResponseSelectionEvaluationResult], ]: - """Computes metrics for intent classification and entity extraction. - Returns intent and entity metrics, and prediction results. - """ + """Computes metrics for intent classification, response selection and entity + extraction. + + Args: + interpreter: the interpreter + training_data: training data + Returns: intent, response selection and entity metrics, and prediction results. + """ intent_results, response_selection_results, entity_results = get_eval_data( - interpreter, corpus + interpreter, training_data ) intent_results = remove_empty_intent_examples(intent_results) - intent_metrics = _compute_metrics( - intent_results, "intent_target", "intent_prediction" + response_selection_results = remove_empty_response_examples( + response_selection_results ) - entity_metrics = _compute_entity_metrics(entity_results, interpreter) - return (intent_metrics, entity_metrics, intent_results, entity_results) + intent_metrics = {} + if intent_results: + intent_metrics = _compute_metrics( + intent_results, "intent_target", "intent_prediction" + ) + + entity_metrics = {} + if entity_results: + entity_metrics = _compute_entity_metrics(entity_results, interpreter) + + response_selection_metrics = {} + if response_selection_results: + response_selection_metrics = _compute_metrics( + response_selection_results, "response_target", "response_prediction" + ) + + return ( + intent_metrics, + entity_metrics, + response_selection_metrics, + intent_results, + entity_results, + response_selection_results, + ) def compare_nlu( @@ -1290,20 +1847,27 @@ def compare_nlu( io_utils.create_path(test_path) train, test = data.train_test_split() - write_to_file(test_path, test.nlu_as_markdown()) - - training_examples_per_run = [] + io_utils.write_text_file(test.nlu_as_markdown(), test_path) for percentage in exclusion_percentages: - percent_string = "{}%_exclusion".format(percentage) + percent_string = f"{percentage}%_exclusion" - _, train = train.train_test_split(percentage / 100) - training_examples_per_run.append(len(train.training_examples)) + _, train_included = train.train_test_split(percentage / 100) + # only count for the first run and ignore the others + if run == 0: + training_examples_per_run.append(len(train_included.training_examples)) model_output_path = os.path.join(run_path, percent_string) - train_split_path = os.path.join(model_output_path, TRAIN_DATA_FILE) - io_utils.create_path(train_split_path) - write_to_file(train_split_path, train.nlu_as_markdown()) + train_split_path = os.path.join(model_output_path, "train") + train_nlu_split_path = os.path.join(train_split_path, TRAIN_DATA_FILE) + train_nlg_split_path = os.path.join(train_split_path, NLG_DATA_FILE) + io_utils.create_path(train_nlu_split_path) + io_utils.write_text_file( + train_included.nlu_as_markdown(), train_nlu_split_path + ) + io_utils.write_text_file( + train_included.nlg_as_markdown(), train_nlg_split_path + ) for nlu_config, model_name in zip(configs, model_names): logger.info( @@ -1320,19 +1884,13 @@ def compare_nlu( fixed_model_name=model_name, ) except Exception as e: - logger.warning( - "Training model '{}' failed. Error: {}".format( - model_name, str(e) - ) - ) + logger.warning(f"Training model '{model_name}' failed. Error: {e}") f_score_results[model_name][run].append(0.0) continue model_path = os.path.join(get_model(model_path), "nlu") - output_path = os.path.join( - model_output_path, "{}_report".format(model_name) - ) + output_path = os.path.join(model_output_path, f"{model_name}_report") result = run_evaluation( test_path, model_path, output_directory=output_path, errors=True ) @@ -1348,14 +1906,22 @@ def _compute_metrics( List[IntentEvaluationResult], List[ResponseSelectionEvaluationResult] ], target_key: Text, - target_prediction: Text, -) -> IntentMetrics: - """Computes evaluation metrics for a given corpus and - returns the results + prediction_key: Text, +) -> Union[IntentMetrics, ResponseSelectionMetrics]: + """Computes evaluation metrics for a given corpus and returns the results. + + Args: + results: evaluation results + target_key: target key name + prediction_key: prediction key name + + Returns: metrics """ + from rasa.test import get_evaluation_metrics + # compute fold metrics targets, predictions = _targets_predictions_from( - results, target_key, target_prediction + results, target_key, prediction_key ) _, precision, f1, accuracy = get_evaluation_metrics(targets, predictions) @@ -1365,11 +1931,17 @@ def _compute_metrics( def _compute_entity_metrics( entity_results: List[EntityEvaluationResult], interpreter: Interpreter ) -> EntityMetrics: - """Computes entity evaluation metrics and returns the results""" + """Computes entity evaluation metrics and returns the results. + + Args: + entity_results: entity evaluation results + interpreter: the interpreter - entity_metric_results = defaultdict( - lambda: defaultdict(list) - ) # type: EntityMetrics + Returns: entity metrics + """ + from rasa.test import get_evaluation_metrics + + entity_metric_results: EntityMetrics = defaultdict(lambda: defaultdict(list)) extractors = get_entity_extractors(interpreter) if not extractors: @@ -1378,11 +1950,13 @@ def _compute_entity_metrics( aligned_predictions = align_all_entity_predictions(entity_results, extractors) merged_targets = merge_labels(aligned_predictions) - merged_targets = substitute_labels(merged_targets, "O", NO_ENTITY) + merged_targets = substitute_labels(merged_targets, NO_ENTITY_TAG, NO_ENTITY) for extractor in extractors: merged_predictions = merge_labels(aligned_predictions, extractor) - merged_predictions = substitute_labels(merged_predictions, "O", NO_ENTITY) + merged_predictions = substitute_labels( + merged_predictions, NO_ENTITY_TAG, NO_ENTITY + ) _, precision, f1, accuracy = get_evaluation_metrics( merged_targets, merged_predictions, exclude_label=NO_ENTITY ) @@ -1393,28 +1967,29 @@ def _compute_entity_metrics( return entity_metric_results -def return_results(results: IntentMetrics, dataset_name: Text) -> None: - """Returns results of crossvalidation - :param results: dictionary of results returned from cv - :param dataset_name: string of which dataset the results are from, e.g. - test/train - """ +def log_results(results: IntentMetrics, dataset_name: Text) -> None: + """Logs results of cross validation. + Args: + results: dictionary of results returned from cross validation + dataset_name: string of which dataset the results are from, e.g. test/train + """ for k, v in results.items(): logger.info( "{} {}: {:.3f} ({:.3f})".format(dataset_name, k, np.mean(v), np.std(v)) ) -def return_entity_results(results: EntityMetrics, dataset_name: Text) -> None: - """Returns entity results of crossvalidation - :param results: dictionary of dictionaries of results returned from cv - :param dataset_name: string of which dataset the results are from, e.g. - test/train +def log_entity_results(results: EntityMetrics, dataset_name: Text) -> None: + """Logs entity results of cross validation. + + Args: + results: dictionary of dictionaries of results returned from cross validation + dataset_name: string of which dataset the results are from, e.g. test/train """ for extractor, result in results.items(): - logger.info("Entity extractor: {}".format(extractor)) - return_results(result, dataset_name) + logger.info(f"Entity extractor: {extractor}") + log_results(result, dataset_name) if __name__ == "__main__": diff --git a/rasa/nlu/tokenizers/__init__.py b/rasa/nlu/tokenizers/__init__.py index 8cb8732bf097..e69de29bb2d1 100644 --- a/rasa/nlu/tokenizers/__init__.py +++ b/rasa/nlu/tokenizers/__init__.py @@ -1,16 +0,0 @@ -class Tokenizer(object): - pass - - -class Token(object): - def __init__(self, text, offset, data=None): - self.offset = offset - self.text = text - self.end = offset + len(text) - self.data = data if data else {} - - def set(self, prop, info): - self.data[prop] = info - - def get(self, prop, default=None): - return self.data.get(prop, default) diff --git a/rasa/nlu/tokenizers/convert_tokenizer.py b/rasa/nlu/tokenizers/convert_tokenizer.py new file mode 100644 index 000000000000..e577a858a07e --- /dev/null +++ b/rasa/nlu/tokenizers/convert_tokenizer.py @@ -0,0 +1,90 @@ +from typing import Any, Dict, List, Optional, Text + +from rasa.core.utils import get_dict_hash +from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS +from rasa.nlu.model import Metadata +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.training_data import Message +from rasa.utils import common +import rasa.utils.train_utils as train_utils +import tensorflow as tf + + +TF_HUB_MODULE_URL = "http://models.poly-ai.com/convert/v1/model.tar.gz" + + +class ConveRTTokenizer(WhitespaceTokenizer): + """Tokenizer using ConveRT model. + Loads the ConveRT(https://github.com/PolyAI-LDN/polyai-models#convert) + model from TFHub and computes sub-word tokens for dense + featurizable attributes of each message object. + """ + + defaults = { + # Flag to check whether to split intents + "intent_tokenization_flag": False, + # Symbol on which intent should be split + "intent_split_symbol": "_", + # Regular expression to detect tokens + "token_pattern": None, + } + + def __init__(self, component_config: Dict[Text, Any] = None) -> None: + """Construct a new tokenizer using the WhitespaceTokenizer framework.""" + + super().__init__(component_config) + + self.module = train_utils.load_tf_hub_model(TF_HUB_MODULE_URL) + + self.tokenize_signature = self.module.signatures["tokenize"] + + @classmethod + def cache_key( + cls, component_meta: Dict[Text, Any], model_metadata: Metadata + ) -> Optional[Text]: + _config = common.update_existing_keys(cls.defaults, component_meta) + return f"{cls.name}-{get_dict_hash(_config)}" + + def provide_context(self) -> Dict[Text, Any]: + return {"tf_hub_module": self.module} + + def _tokenize(self, sentence: Text) -> Any: + + return self.tokenize_signature(tf.convert_to_tensor([sentence]))[ + "default" + ].numpy() + + def tokenize(self, message: Message, attribute: Text) -> List[Token]: + """Tokenize the text using the ConveRT model. + ConveRT adds a special char in front of (some) words and splits words into + sub-words. To ensure the entity start and end values matches the token values, + tokenize the text first using the whitespace tokenizer. If individual tokens + are split up into multiple tokens, add this information to the + respected tokens. + """ + + # perform whitespace tokenization + tokens_in = super().tokenize(message, attribute) + + tokens_out = [] + + for token in tokens_in: + # use ConveRT model to tokenize the text + split_token_strings = self._tokenize(token.text)[0] + + # clean tokens (remove special chars and empty tokens) + split_token_strings = self._clean_tokens(split_token_strings) + + token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings)) + + tokens_out.append(token) + + return tokens_out + + @staticmethod + def _clean_tokens(tokens: List[bytes]) -> List[Text]: + """Encode tokens and remove special char added by ConveRT.""" + + tokens = [string.decode("utf-8").replace("﹏", "") for string in tokens] + return [string for string in tokens if string] diff --git a/rasa/nlu/tokenizers/jieba_tokenizer.py b/rasa/nlu/tokenizers/jieba_tokenizer.py index d489be5c2ad0..ef034bde430f 100644 --- a/rasa/nlu/tokenizers/jieba_tokenizer.py +++ b/rasa/nlu/tokenizers/jieba_tokenizer.py @@ -6,19 +6,10 @@ from typing import Any, Dict, List, Optional, Text from rasa.nlu.components import Component -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.tokenizers import Token, Tokenizer -from rasa.nlu.training_data import Message, TrainingData - -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, -) +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.training_data import Message + +from rasa.nlu.constants import TOKENS_NAMES, MESSAGE_ATTRIBUTES logger = logging.getLogger(__name__) @@ -27,9 +18,7 @@ from rasa.nlu.model import Metadata -class JiebaTokenizer(Tokenizer, Component): - - provides = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES] +class JiebaTokenizer(Tokenizer): language_list = ["zh"] @@ -39,24 +28,18 @@ class JiebaTokenizer(Tokenizer, Component): "intent_tokenization_flag": False, # Symbol on which intent should be split "intent_split_symbol": "_", + # Regular expression to detect tokens + "token_pattern": None, } # default don't load custom dictionary def __init__(self, component_config: Dict[Text, Any] = None) -> None: """Construct a new intent classifier using the MITIE framework.""" - super(JiebaTokenizer, self).__init__(component_config) + super().__init__(component_config) # path to dictionary file or None self.dictionary_path = self.component_config.get("dictionary_path") - # flag to check whether to split intents - self.intent_tokenization_flag = self.component_config.get( - "intent_tokenization_flag" - ) - - # symbol to split intents on - self.intent_split_symbol = self.component_config.get("intent_split_symbol") - # load dictionary if self.dictionary_path is not None: self.load_custom_dictionary(self.dictionary_path) @@ -75,46 +58,20 @@ def load_custom_dictionary(path: Text) -> None: """ import jieba - jieba_userdicts = glob.glob("{}/*".format(path)) + jieba_userdicts = glob.glob(f"{path}/*") for jieba_userdict in jieba_userdicts: - logger.info("Loading Jieba User Dictionary at {}".format(jieba_userdict)) + logger.info(f"Loading Jieba User Dictionary at {jieba_userdict}") jieba.load_userdict(jieba_userdict) - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - for example in training_data.training_examples: - - for attribute in MESSAGE_ATTRIBUTES: - - if example.get(attribute) is not None: - example.set( - MESSAGE_TOKENS_NAMES[attribute], - self.tokenize(example.get(attribute), attribute), - ) - - def process(self, message: Message, **kwargs: Any) -> None: - - message.set( - MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], - self.tokenize(message.text, MESSAGE_TEXT_ATTRIBUTE), - ) - - def preprocess_text(self, text, attribute): - - if attribute == MESSAGE_INTENT_ATTRIBUTE and self.intent_tokenization_flag: - return " ".join(text.split(self.intent_split_symbol)) - else: - return text - - def tokenize(self, text: Text, attribute=MESSAGE_TEXT_ATTRIBUTE) -> List[Token]: + def tokenize(self, message: Message, attribute: Text) -> List[Token]: import jieba - text = self.preprocess_text(text, attribute) + text = message.get(attribute) + tokenized = jieba.tokenize(text) tokens = [Token(word, start) for (word, start, end) in tokenized] - return tokens + + return self._apply_token_pattern(tokens) @classmethod def load( @@ -123,7 +80,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional["Metadata"] = None, cached_component: Optional[Component] = None, - **kwargs: Any + **kwargs: Any, ) -> "JiebaTokenizer": relative_dictionary_path = meta.get("dictionary_path") @@ -137,12 +94,12 @@ def load( return cls(meta) @staticmethod - def copy_files_dir_to_dir(input_dir, output_dir): + def copy_files_dir_to_dir(input_dir: Text, output_dir: Text) -> None: # make sure target path exists if not os.path.exists(output_dir): os.makedirs(output_dir) - target_file_list = glob.glob("{}/*".format(input_dir)) + target_file_list = glob.glob(f"{input_dir}/*") for target_file in target_file_list: shutil.copy2(target_file, output_dir) diff --git a/rasa/nlu/tokenizers/lm_tokenizer.py b/rasa/nlu/tokenizers/lm_tokenizer.py new file mode 100644 index 000000000000..4edb431b5986 --- /dev/null +++ b/rasa/nlu/tokenizers/lm_tokenizer.py @@ -0,0 +1,35 @@ +from typing import Text, List, Any, Dict, Type + +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.components import Component +from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP +from rasa.nlu.training_data import Message + +from rasa.nlu.constants import LANGUAGE_MODEL_DOCS, TOKENS + + +class LanguageModelTokenizer(Tokenizer): + """Tokenizer using transformer based language models. + + Uses the output of HFTransformersNLP component to set the tokens + for dense featurizable attributes of each message object. + """ + + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [HFTransformersNLP] + + defaults = { + # Flag to check whether to split intents + "intent_tokenization_flag": False, + # Symbol on which intent should be split + "intent_split_symbol": "_", + } + + def get_doc(self, message: Message, attribute: Text) -> Dict[Text, Any]: + return message.get(LANGUAGE_MODEL_DOCS[attribute]) + + def tokenize(self, message: Message, attribute: Text) -> List[Token]: + doc = self.get_doc(message, attribute) + + return doc[TOKENS] diff --git a/rasa/nlu/tokenizers/mitie_tokenizer.py b/rasa/nlu/tokenizers/mitie_tokenizer.py index df8f61552492..0b653a153d91 100644 --- a/rasa/nlu/tokenizers/mitie_tokenizer.py +++ b/rasa/nlu/tokenizers/mitie_tokenizer.py @@ -1,65 +1,48 @@ -from typing import Any, List, Text +from typing import List, Text -from rasa.nlu.components import Component -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.tokenizers import Token, Tokenizer -from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.training_data import Message -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, -) +from rasa.utils.io import DEFAULT_ENCODING -class MitieTokenizer(Tokenizer, Component): +class MitieTokenizer(Tokenizer): - provides = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES] + defaults = { + # Flag to check whether to split intents + "intent_tokenization_flag": False, + # Symbol on which intent should be split + "intent_split_symbol": "_", + # Regular expression to detect tokens + "token_pattern": None, + } @classmethod def required_packages(cls) -> List[Text]: return ["mitie"] - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - for example in training_data.training_examples: - - for attribute in MESSAGE_ATTRIBUTES: - - if example.get(attribute) is not None: - example.set( - MESSAGE_TOKENS_NAMES[attribute], - self.tokenize(example.get(attribute)), - ) - - def process(self, message: Message, **kwargs: Any) -> None: - - message.set( - MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], self.tokenize(message.text) - ) - - def _token_from_offset(self, text, offset, encoded_sentence): - return Token( - text.decode("utf-8"), self._byte_to_char_offset(encoded_sentence, offset) - ) - - def tokenize(self, text: Text) -> List[Token]: + def tokenize(self, message: Message, attribute: Text) -> List[Token]: import mitie - encoded_sentence = text.encode("utf-8") + text = message.get(attribute) + + encoded_sentence = text.encode(DEFAULT_ENCODING) tokenized = mitie.tokenize_with_offsets(encoded_sentence) tokens = [ self._token_from_offset(token, offset, encoded_sentence) for token, offset in tokenized ] - return tokens + + return self._apply_token_pattern(tokens) + + def _token_from_offset( + self, text: bytes, offset: int, encoded_sentence: bytes + ) -> Token: + return Token( + text.decode(DEFAULT_ENCODING), + self._byte_to_char_offset(encoded_sentence, offset), + ) @staticmethod def _byte_to_char_offset(text: bytes, byte_offset: int) -> int: - return len(text[:byte_offset].decode("utf-8")) + return len(text[:byte_offset].decode(DEFAULT_ENCODING)) diff --git a/rasa/nlu/tokenizers/spacy_tokenizer.py b/rasa/nlu/tokenizers/spacy_tokenizer.py index 87443d3375de..3860cc274443 100644 --- a/rasa/nlu/tokenizers/spacy_tokenizer.py +++ b/rasa/nlu/tokenizers/spacy_tokenizer.py @@ -1,63 +1,55 @@ import typing -from typing import Any +from typing import Text, List, Any, Type +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer from rasa.nlu.components import Component -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.tokenizers import Token, Tokenizer -from rasa.nlu.training_data import Message, TrainingData - -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, - SPACY_FEATURIZABLE_ATTRIBUTES, -) +from rasa.nlu.utils.spacy_utils import SpacyNLP +from rasa.nlu.training_data import Message + +from rasa.nlu.constants import SPACY_DOCS if typing.TYPE_CHECKING: from spacy.tokens.doc import Doc # pytype: disable=import-error -class SpacyTokenizer(Tokenizer, Component): - - provides = [ - MESSAGE_TOKENS_NAMES[attribute] for attribute in SPACY_FEATURIZABLE_ATTRIBUTES - ] - - requires = [ - MESSAGE_SPACY_FEATURES_NAMES[attribute] - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES - ] - - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - - for example in training_data.training_examples: +POS_TAG_KEY = "pos" - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES: - attribute_doc = self.get_doc(example, attribute) +class SpacyTokenizer(Tokenizer): + @classmethod + def required_components(cls) -> List[Type[Component]]: + return [SpacyNLP] - if attribute_doc is not None: - example.set( - MESSAGE_TOKENS_NAMES[attribute], self.tokenize(attribute_doc) - ) + defaults = { + # Flag to check whether to split intents + "intent_tokenization_flag": False, + # Symbol on which intent should be split + "intent_split_symbol": "_", + # Regular expression to detect tokens + "token_pattern": None, + } - def get_doc(self, message, attribute): + def get_doc(self, message: Message, attribute: Text) -> "Doc": + return message.get(SPACY_DOCS[attribute]) - return message.get(MESSAGE_SPACY_FEATURES_NAMES[attribute]) + def tokenize(self, message: Message, attribute: Text) -> List[Token]: + doc = self.get_doc(message, attribute) - def process(self, message: Message, **kwargs: Any) -> None: + tokens = [ + Token( + t.text, t.idx, lemma=t.lemma_, data={POS_TAG_KEY: self._tag_of_token(t)} + ) + for t in doc + if t.text and t.text.strip() + ] - message.set( - MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], - self.tokenize(self.get_doc(message, MESSAGE_TEXT_ATTRIBUTE)), - ) + return self._apply_token_pattern(tokens) - def tokenize(self, doc: "Doc") -> typing.List[Token]: + @staticmethod + def _tag_of_token(token: Any) -> Text: + import spacy - return [Token(t.text, t.idx) for t in doc] + if spacy.about.__version__ > "2" and token._.has("tag"): + return token._.get("tag") + else: + return token.tag_ diff --git a/rasa/nlu/tokenizers/tokenizer.py b/rasa/nlu/tokenizers/tokenizer.py new file mode 100644 index 000000000000..4d3bad85e73c --- /dev/null +++ b/rasa/nlu/tokenizers/tokenizer.py @@ -0,0 +1,161 @@ +import logging +import re + +from typing import Text, List, Optional, Dict, Any + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import TrainingData, Message +from rasa.nlu.components import Component +from rasa.nlu.constants import TEXT, TOKENS_NAMES, MESSAGE_ATTRIBUTES, INTENT + +logger = logging.getLogger(__name__) + + +class Token(object): + def __init__( + self, + text: Text, + start: int, + end: Optional[int] = None, + data: Optional[Dict[Text, Any]] = None, + lemma: Optional[Text] = None, + ) -> None: + self.text = text + self.start = start + self.end = end if end else start + len(text) + + self.data = data if data else {} + self.lemma = lemma or text + + def set(self, prop: Text, info: Any) -> None: + self.data[prop] = info + + def get(self, prop: Text, default: Optional[Any] = None) -> Any: + return self.data.get(prop, default) + + def __eq__(self, other): + if not isinstance(other, Token): + return NotImplemented + return (self.start, self.end, self.text, self.lemma) == ( + other.start, + other.end, + other.text, + other.lemma, + ) + + def __lt__(self, other): + if not isinstance(other, Token): + return NotImplemented + return (self.start, self.end, self.text, self.lemma) < ( + other.start, + other.end, + other.text, + other.lemma, + ) + + +class Tokenizer(Component): + def __init__(self, component_config: Dict[Text, Any] = None) -> None: + """Construct a new tokenizer using the WhitespaceTokenizer framework.""" + + super().__init__(component_config) + + # flag to check whether to split intents + self.intent_tokenization_flag = self.component_config.get( + "intent_tokenization_flag", False + ) + # split symbol for intents + self.intent_split_symbol = self.component_config.get("intent_split_symbol", "_") + # token pattern to further split tokens + token_pattern = self.component_config.get("token_pattern", None) + self.token_pattern_regex = None + if token_pattern: + self.token_pattern_regex = re.compile(token_pattern) + + def tokenize(self, message: Message, attribute: Text) -> List[Token]: + """Tokenizes the text of the provided attribute of the incoming message.""" + + raise NotImplementedError + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + """Tokenize all training data.""" + + for example in training_data.training_examples: + for attribute in MESSAGE_ATTRIBUTES: + if example.get(attribute) is not None: + if attribute == INTENT: + tokens = self._split_intent(example) + else: + tokens = self.tokenize(example, attribute) + example.set(TOKENS_NAMES[attribute], tokens) + + def process(self, message: Message, **kwargs: Any) -> None: + """Tokenize the incoming message.""" + + tokens = self.tokenize(message, TEXT) + message.set(TOKENS_NAMES[TEXT], tokens) + + def _split_intent(self, message: Message): + text = message.get(INTENT) + + words = ( + text.split(self.intent_split_symbol) + if self.intent_tokenization_flag + else [text] + ) + + return self._convert_words_to_tokens(words, text) + + def _apply_token_pattern(self, tokens: List[Token]) -> List[Token]: + """Apply the token pattern to the given tokens. + + Args: + tokens: list of tokens to split + + Returns: + List of tokens. + """ + if not self.token_pattern_regex: + return tokens + + final_tokens = [] + for token in tokens: + new_tokens = self.token_pattern_regex.findall(token.text) + new_tokens = [t for t in new_tokens if t] + + if not new_tokens: + final_tokens.append(token) + + running_offset = 0 + for new_token in new_tokens: + word_offset = token.text.index(new_token, running_offset) + word_len = len(new_token) + running_offset = word_offset + word_len + final_tokens.append( + Token( + new_token, + token.start + word_offset, + data=token.data, + lemma=token.lemma, + ) + ) + + return final_tokens + + @staticmethod + def _convert_words_to_tokens(words: List[Text], text: Text) -> List[Token]: + running_offset = 0 + tokens = [] + + for word in words: + word_offset = text.index(word, running_offset) + word_len = len(word) + running_offset = word_offset + word_len + tokens.append(Token(word, word_offset)) + + return tokens diff --git a/rasa/nlu/tokenizers/whitespace_tokenizer.py b/rasa/nlu/tokenizers/whitespace_tokenizer.py index 94179ead2acb..b64f3d312c13 100644 --- a/rasa/nlu/tokenizers/whitespace_tokenizer.py +++ b/rasa/nlu/tokenizers/whitespace_tokenizer.py @@ -1,96 +1,95 @@ -import re from typing import Any, Dict, List, Text -from rasa.nlu.components import Component -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.tokenizers import Token, Tokenizer -from rasa.nlu.training_data import Message, TrainingData -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, -) +import regex +import re +from rasa.constants import DOCS_URL_COMPONENTS +from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer +from rasa.nlu.training_data import Message +import rasa.utils.common as common_utils -class WhitespaceTokenizer(Tokenizer, Component): - provides = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES] +class WhitespaceTokenizer(Tokenizer): defaults = { # Flag to check whether to split intents "intent_tokenization_flag": False, # Symbol on which intent should be split "intent_split_symbol": "_", - # text will be tokenized with case sensitive as default - "case_sensitive": True, + # Regular expression to detect tokens + "token_pattern": None, } + # the following language should not be tokenized using the WhitespaceTokenizer + not_supported_language_list = ["zh", "ja", "th"] + def __init__(self, component_config: Dict[Text, Any] = None) -> None: """Construct a new tokenizer using the WhitespaceTokenizer framework.""" - super(WhitespaceTokenizer, self).__init__(component_config) - # flag to check whether to split intents - self.intent_tokenization_flag = self.component_config.get( - "intent_tokenization_flag" - ) - # split symbol for intents - self.intent_split_symbol = self.component_config["intent_split_symbol"] - self.case_sensitive = self.component_config["case_sensitive"] - - def train( - self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any - ) -> None: - for example in training_data.training_examples: - for attribute in MESSAGE_ATTRIBUTES: - if example.get(attribute) is not None: - example.set( - MESSAGE_TOKENS_NAMES[attribute], - self.tokenize(example.get(attribute), attribute), - ) - - def process(self, message: Message, **kwargs: Any) -> None: - - message.set( - MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], self.tokenize(message.text) + super().__init__(component_config) + + self.emoji_pattern = self.get_emoji_regex() + + if "case_sensitive" in self.component_config: + common_utils.raise_warning( + "The option 'case_sensitive' was moved from the tokenizers to the " + "featurizers.", + docs=DOCS_URL_COMPONENTS, + ) + + @staticmethod + def get_emoji_regex(): + return re.compile( + "[" + "\U0001F600-\U0001F64F" # emoticons + "\U0001F300-\U0001F5FF" # symbols & pictographs + "\U0001F680-\U0001F6FF" # transport & map symbols + "\U0001F1E0-\U0001F1FF" # flags (iOS) + "\U00002702-\U000027B0" + "\U000024C2-\U0001F251" + "\u200d" # zero width joiner + "\u200c" # zero width non-joiner + "]+", + flags=re.UNICODE, ) - def tokenize( - self, text: Text, attribute: Text = MESSAGE_TEXT_ATTRIBUTE - ) -> List[Token]: + def remove_emoji(self, text: Text) -> Text: + """Remove emoji if the full text, aka token, matches the emoji regex.""" + match = self.emoji_pattern.fullmatch(text) + + if match is not None: + return "" + + return text + + def tokenize(self, message: Message, attribute: Text) -> List[Token]: + text = message.get(attribute) + + # we need to use regex instead of re, because of + # https://stackoverflow.com/questions/12746458/python-unicode-regular-expression-matching-failing-with-some-unicode-characters - if not self.case_sensitive: - text = text.lower() # remove 'not a word character' if - if attribute != MESSAGE_INTENT_ATTRIBUTE: - words = re.sub( - # there is a space or an end of a string after it - r"[^\w#@&]+(?=\s|$)|" - # there is a space or beginning of a string before it - # not followed by a number - r"(\s|^)[^\w#@&]+(?=[^0-9\s])|" - # not in between numbers and not . or @ or & or - or # - # e.g. 10'000.00 or blabla@gmail.com - # and not url characters - r"(?<=[^0-9\s])[^\w._~:/?#\[\]()@!$&*+,;=-]+(?=[^0-9\s])", - " ", - text, - ).split() - else: - words = ( - text.split(self.intent_split_symbol) - if self.intent_tokenization_flag - else [text] - ) + words = regex.sub( + # there is a space or an end of a string after it + r"[^\w#@&]+(?=\s|$)|" + # there is a space or beginning of a string before it + # not followed by a number + r"(\s|^)[^\w#@&]+(?=[^0-9\s])|" + # not in between numbers and not . or @ or & or - or # + # e.g. 10'000.00 or blabla@gmail.com + # and not url characters + r"(?<=[^0-9\s])[^\w._~:/?#\[\]()@!$&*+,;=-]+(?=[^0-9\s])", + " ", + text, + ).split() + + words = [self.remove_emoji(w) for w in words] + words = [w for w in words if w] + + # if we removed everything like smiles `:)`, use the whole text as 1 token + if not words: + words = [text] + + tokens = self._convert_words_to_tokens(words, text) - running_offset = 0 - tokens = [] - for word in words: - word_offset = text.index(word, running_offset) - word_len = len(word) - running_offset = word_offset + word_len - tokens.append(Token(word, word_offset)) - return tokens + return self._apply_token_pattern(tokens) diff --git a/rasa/nlu/train.py b/rasa/nlu/train.py index fe941eb92e8c..33f30b023979 100644 --- a/rasa/nlu/train.py +++ b/rasa/nlu/train.py @@ -2,6 +2,7 @@ import typing from typing import Any, Optional, Text, Tuple, Union, Dict +import rasa.utils.common as common_utils from rasa.nlu import config from rasa.nlu.components import ComponentBuilder from rasa.nlu.config import RasaNLUModelConfig @@ -25,12 +26,18 @@ class TrainingException(Exception): message -- explanation of why the request is invalid """ - def __init__(self, failed_target_project=None, exception=None): + def __init__( + self, + failed_target_project: Optional[Text] = None, + exception: Optional[Exception] = None, + ) -> None: self.failed_target_project = failed_target_project if exception: self.message = exception.args[0] + else: + self.message = "" - def __str__(self): + def __str__(self) -> Text: return self.message @@ -54,7 +61,7 @@ async def train( component_builder: Optional[ComponentBuilder] = None, training_data_endpoint: Optional[EndpointConfig] = None, persist_nlu_training_data: bool = False, - **kwargs: Any + **kwargs: Any, ) -> Tuple[Trainer, Interpreter, Optional[Text]]: """Loads the trainer and the data and runs the training of the model.""" from rasa.importers.importer import TrainingDataImporter @@ -72,11 +79,14 @@ async def train( training_data_endpoint, nlu_config.language ) elif isinstance(data, TrainingDataImporter): - training_data = await data.get_nlu_data(nlu_config.data) + training_data = await data.get_nlu_data(nlu_config.language) else: training_data = load_data(data, nlu_config.language) training_data.print_stats() + if training_data.entity_roles_groups_used(): + common_utils.mark_as_experimental_feature("Entity Roles and Groups feature") + interpreter = trainer.train(training_data, **kwargs) if path: diff --git a/rasa/nlu/training_data/__init__.py b/rasa/nlu/training_data/__init__.py index 2ffbe7921da4..001368f9a70d 100644 --- a/rasa/nlu/training_data/__init__.py +++ b/rasa/nlu/training_data/__init__.py @@ -1,5 +1,6 @@ -# -*- coding: utf-8 -*- - +import rasa.nlu.training_data.entities_parser +import rasa.nlu.training_data.synonyms_parser +import rasa.nlu.training_data.lookup_tables_parser from rasa.nlu.training_data.loading import load_data from rasa.nlu.training_data.message import Message from rasa.nlu.training_data.training_data import TrainingData diff --git a/rasa/nlu/training_data/entities_parser.py b/rasa/nlu/training_data/entities_parser.py new file mode 100644 index 000000000000..d5293896f3f1 --- /dev/null +++ b/rasa/nlu/training_data/entities_parser.py @@ -0,0 +1,167 @@ +import re +from json import JSONDecodeError +from typing import Text, List, Dict, Match, Optional, NamedTuple, Any + +from rasa.constants import DOCS_URL_TRAINING_DATA_NLU +from rasa.nlu.constants import ( + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_VALUE, +) +from rasa.utils.common import raise_warning + +GROUP_ENTITY_VALUE = "value" +GROUP_ENTITY_TYPE = "entity" +GROUP_ENTITY_DICT = "entity_dict" +GROUP_ENTITY_TEXT = "entity_text" +GROUP_COMPLETE_MATCH = 0 + +# regex for: `[entity_text]((entity_type(:entity_synonym)?)|{entity_dict})` +ENTITY_REGEX = re.compile( + r"\[(?P<entity_text>[^\]]+?)\](\((?P<entity>[^:)]+?)(?:\:(?P<value>[^)]+))?\)|\{(?P<entity_dict>[^}]+?)\})" +) + + +class EntityAttributes(NamedTuple): + """Attributes of an entity defined in markdown data.""" + + type: Text + value: Text + text: Text + group: Optional[Text] + role: Optional[Text] + + +def find_entities_in_training_example(example: Text) -> List[Dict[Text, Any]]: + """Extracts entities from an intent example. + + Args: + example: Intent example. + + Returns: + Extracted entities. + """ + import rasa.nlu.utils as rasa_nlu_utils + + entities = [] + offset = 0 + + for match in re.finditer(ENTITY_REGEX, example): + entity_attributes = extract_entity_attributes(match) + + start_index = match.start() - offset + end_index = start_index + len(entity_attributes.text) + offset += len(match.group(0)) - len(entity_attributes.text) + + entity = rasa_nlu_utils.build_entity( + start_index, + end_index, + entity_attributes.value, + entity_attributes.type, + entity_attributes.role, + entity_attributes.group, + ) + entities.append(entity) + + return entities + + +def extract_entity_attributes(match: Match) -> EntityAttributes: + """Extract the entity attributes, i.e. type, value, etc., from the + regex match. + + Args: + match: Regex match to extract the entity attributes from. + + Returns: + EntityAttributes object. + """ + entity_text = match.groupdict()[GROUP_ENTITY_TEXT] + + if match.groupdict()[GROUP_ENTITY_DICT]: + return extract_entity_attributes_from_dict(entity_text, match) + + entity_type = match.groupdict()[GROUP_ENTITY_TYPE] + + if match.groupdict()[GROUP_ENTITY_VALUE]: + entity_value = match.groupdict()[GROUP_ENTITY_VALUE] + else: + entity_value = entity_text + + return EntityAttributes(entity_type, entity_value, entity_text, None, None) + + +def extract_entity_attributes_from_dict( + entity_text: Text, match: Match +) -> EntityAttributes: + """Extract entity attributes from dict format. + + Args: + entity_text: Original entity text. + match: Regex match. + + Returns: + Extracted entity attributes. + """ + entity_dict_str = match.groupdict()[GROUP_ENTITY_DICT] + entity_dict = get_validated_dict(entity_dict_str) + return EntityAttributes( + entity_dict.get(ENTITY_ATTRIBUTE_TYPE), + entity_dict.get(ENTITY_ATTRIBUTE_VALUE, entity_text), + entity_text, + entity_dict.get(ENTITY_ATTRIBUTE_GROUP), + entity_dict.get(ENTITY_ATTRIBUTE_ROLE), + ) + + +def get_validated_dict(json_str: Text) -> Dict[Text, Text]: + """Converts the provided `json_str` to a valid dict containing the entity + attributes. + + Users can specify entity roles, synonyms, groups for an entity in a dict, e.g. + [LA]{"entity": "city", "role": "to", "value": "Los Angeles"}. + + Args: + json_str: The entity dict as string without "{}". + + Raises: + ValidationError if validation of entity dict fails. + JSONDecodeError if provided entity dict is not valid json. + + Returns: + Deserialized and validated `json_str`. + """ + import json + import rasa.utils.validation as validation_utils + import rasa.nlu.schemas.data_schema as schema + + # add {} as they are not part of the regex + try: + data = json.loads(f"{{{json_str}}}") + except JSONDecodeError as e: + raise_warning( + f"Incorrect training data format ('{{{json_str}}}'). Make sure your " + f"data is valid.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + raise e + + validation_utils.validate_training_data(data, schema.entity_dict_schema()) + + return data + + +def replace_entities(training_example: Text) -> Text: + """Replace special symbols related to the entities in the provided + training example. + + Args: + training_example: Original training example with special symbols. + + Returns: + String with removed special symbols. + """ + return re.sub( + ENTITY_REGEX, lambda m: m.groupdict()[GROUP_ENTITY_TEXT], training_example + ) diff --git a/rasa/nlu/training_data/formats/__init__.py b/rasa/nlu/training_data/formats/__init__.py index 3951ea290651..a88c464f9600 100644 --- a/rasa/nlu/training_data/formats/__init__.py +++ b/rasa/nlu/training_data/formats/__init__.py @@ -1,3 +1,4 @@ +from rasa.nlu.training_data.formats.rasa_yaml import RasaYAMLReader from rasa.nlu.training_data.formats.dialogflow import DialogflowReader from rasa.nlu.training_data.formats.luis import LuisReader from rasa.nlu.training_data.formats.markdown import MarkdownReader, MarkdownWriter @@ -7,3 +8,4 @@ NLGMarkdownReader, NLGMarkdownWriter, ) +from rasa.nlu.training_data.formats.rasa_yaml import RasaYAMLReader diff --git a/rasa/nlu/training_data/formats/dialogflow.py b/rasa/nlu/training_data/formats/dialogflow.py index c5e258f6e342..d34c72745bb8 100644 --- a/rasa/nlu/training_data/formats/dialogflow.py +++ b/rasa/nlu/training_data/formats/dialogflow.py @@ -1,12 +1,14 @@ import logging import os import typing -from typing import Any, Text, Optional +from typing import Any, Dict, Optional, Text, List, Tuple +from rasa.constants import DOCS_URL_MIGRATE_GOOGLE +import rasa.utils.io from rasa.nlu import utils from rasa.nlu.training_data.formats.readerwriter import TrainingDataReader from rasa.nlu.training_data.util import transform_entity_synonyms -import rasa.utils.io +from rasa.utils.common import raise_warning if typing.TYPE_CHECKING: from rasa.nlu.training_data import TrainingData @@ -39,8 +41,9 @@ def read(self, fn: Text, **kwargs: Any) -> "TrainingData": examples_js = self._read_examples_js(fn, language, fformat) if not examples_js: - logger.warning( - "No training examples found for dialogflow file {}!".format(fn) + raise_warning( + f"No training examples found for dialogflow file {fn}!", + docs=DOCS_URL_MIGRATE_GOOGLE, ) return TrainingData() elif fformat == DIALOGFLOW_INTENT: @@ -48,7 +51,9 @@ def read(self, fn: Text, **kwargs: Any) -> "TrainingData": else: # path for DIALOGFLOW_ENTITIES return self._read_entities(root_js, examples_js) - def _read_intent(self, intent_js, examples_js): + def _read_intent( + self, intent_js: Dict[Text, Any], examples_js: List[Dict[Text, Any]] + ) -> "TrainingData": """Reads the intent and examples from respective jsons.""" from rasa.nlu.training_data import Message, TrainingData @@ -61,7 +66,9 @@ def _read_intent(self, intent_js, examples_js): return TrainingData(training_examples) - def _join_text_chunks(self, chunks): + def _join_text_chunks( + self, chunks: List[Dict[Text, Any]] + ) -> Tuple[Text, List[Dict[Text, Any]]]: """Combines text chunks and extracts entities.""" utterance = "" @@ -75,7 +82,9 @@ def _join_text_chunks(self, chunks): return utterance, entities @staticmethod - def _extract_entity(chunk, current_offset): + def _extract_entity( + chunk: Dict[Text, Any], current_offset: int + ) -> Optional[Dict[Text, Any]]: """Extract an entity from a chunk if present.""" entity = None @@ -90,18 +99,20 @@ def _extract_entity(chunk, current_offset): return entity @staticmethod - def _flatten(list_of_lists): + def _flatten(list_of_lists: List[List[Any]]) -> List[Any]: return [item for items in list_of_lists for item in items] @staticmethod - def _extract_lookup_tables(name, examples): + def _extract_lookup_tables( + name: Text, examples: List[Dict[Text, Any]] + ) -> Optional[List[Dict[Text, Any]]]: """Extract the lookup table from the entity synonyms""" synonyms = [e["synonyms"] for e in examples if "synonyms" in e] synonyms = DialogflowReader._flatten(synonyms) elements = [synonym for synonym in synonyms if "@" not in synonym] if len(elements) == 0: - return False + return None return [{"name": name, "elements": elements}] @staticmethod @@ -115,7 +126,7 @@ def _read_entities(entity_js, examples_js) -> "TrainingData": return TrainingData([], entity_synonyms, [], lookup_tables) @staticmethod - def _read_examples_js(fn: Text, language: Text, fformat: Text) -> Optional[Text]: + def _read_examples_js(fn: Text, language: Text, fformat: Text) -> Any: """Infer and load the example file based on the root filename and root format.""" @@ -123,7 +134,7 @@ def _read_examples_js(fn: Text, language: Text, fformat: Text) -> Optional[Text] examples_type = "usersays" else: examples_type = "entries" - examples_fn_ending = "_{}_{}.json".format(examples_type, language) + examples_fn_ending = f"_{examples_type}_{language}.json" examples_fn = fn.replace(".json", examples_fn_ending) if os.path.isfile(examples_fn): return rasa.utils.io.read_json_file(examples_fn) diff --git a/rasa/nlu/training_data/formats/luis.py b/rasa/nlu/training_data/formats/luis.py index a8d27e9f8eac..ef645e3fd67c 100644 --- a/rasa/nlu/training_data/formats/luis.py +++ b/rasa/nlu/training_data/formats/luis.py @@ -3,6 +3,7 @@ from typing import Any, Dict, Text from rasa.nlu.training_data.formats.readerwriter import JsonTrainingDataReader +from rasa.utils.common import raise_warning if typing.TYPE_CHECKING: from rasa.nlu.training_data import Message, TrainingData @@ -18,14 +19,13 @@ def read_from_json(self, js: Dict[Text, Any], **kwargs: Any) -> "TrainingData": training_examples = [] regex_features = [] - # Simple check to ensure we support this luis data schema version - if not js["luis_schema_version"].startswith("2"): - raise Exception( - "Invalid luis data schema version {}, " - "should be 2.x.x. " - "Make sure to use the latest luis version " - "(e.g. by downloading your data again)." - "".format(js["luis_schema_version"]) + max_tested_luis_schema_version = 5 + major_version = int(js["luis_schema_version"].split(".")[0]) + if major_version > max_tested_luis_schema_version: + raise_warning( + f"Your luis data schema version {js['luis_schema_version']} " + f"is higher than 5.x.x. " + f"Training may not be performed correctly. " ) for r in js.get("regex_features", []): diff --git a/rasa/nlu/training_data/formats/markdown.py b/rasa/nlu/training_data/formats/markdown.py index 9199d13e232e..f40416202437 100644 --- a/rasa/nlu/training_data/formats/markdown.py +++ b/rasa/nlu/training_data/formats/markdown.py @@ -1,18 +1,24 @@ import logging import re import typing -from typing import Any, Text +from collections import OrderedDict +from json import JSONDecodeError +from pathlib import Path +from typing import Any, Text, Optional, Tuple, Dict, Union +import rasa.utils.io as io_utils +from rasa.constants import DOCS_URL_TRAINING_DATA_NLU from rasa.nlu.training_data.formats.readerwriter import ( TrainingDataReader, TrainingDataWriter, ) -from rasa.nlu.utils import build_entity -from rasa.nlu.constants import ( - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_RESPONSE_KEY_ATTRIBUTE, - RESPONSE_IDENTIFIER_DELIMITER, -) +from rasa.utils.common import raise_warning +from rasa.utils.io import encode_string, decode_string + +GROUP_ENTITY_VALUE = "value" +GROUP_ENTITY_TYPE = "entity" +GROUP_ENTITY_DICT = "entity_dict" +GROUP_ENTITY_TEXT = "entity_text" if typing.TYPE_CHECKING: from rasa.nlu.training_data import Message, TrainingData @@ -21,33 +27,13 @@ SYNONYM = "synonym" REGEX = "regex" LOOKUP = "lookup" -available_sections = [INTENT, SYNONYM, REGEX, LOOKUP] - -# regex for: `[entity_text](entity_type(:entity_synonym)?)` -ent_regex = re.compile( - r"\[(?P<entity_text>[^\]]+)" r"\]\((?P<entity>[^:)]*?)" r"(?:\:(?P<value>[^)]+))?\)" -) +AVAILABLE_SECTIONS = [INTENT, SYNONYM, REGEX, LOOKUP] +MARKDOWN_SECTION_MARKERS = [f"## {s}:" for s in AVAILABLE_SECTIONS] -item_regex = re.compile(r"\s*[-*+]\s*(.+)") +item_regex = re.compile(r"\s*[-*+]\s*((?:.+\s*)*)") comment_regex = re.compile(r"<!--[\s\S]*?--!*>", re.MULTILINE) fname_regex = re.compile(r"\s*([^-*+]+)") -ESCAPE_DCT = {"\b": "\\b", "\f": "\\f", "\n": "\\n", "\r": "\\r", "\t": "\\t"} - -ESCAPE = re.compile(r"[\b\f\n\r\t]") - - -def encode_string(s): - """Return a encoded python string - - """ - - def replace(match): - return ESCAPE_DCT[match.group(0)] - - return ESCAPE.sub(replace, s) - - logger = logging.getLogger(__name__) @@ -55,28 +41,43 @@ class MarkdownReader(TrainingDataReader): """Reads markdown training data and creates a TrainingData object.""" def __init__(self) -> None: + super().__init__() self.current_title = None self.current_section = None self.training_examples = [] self.entity_synonyms = {} self.regex_features = [] - self.section_regexes = self._create_section_regexes(available_sections) self.lookup_tables = [] + self._deprecated_synonym_format_was_used = False + def reads(self, s: Text, **kwargs: Any) -> "TrainingData": """Read markdown string and create TrainingData object""" from rasa.nlu.training_data import TrainingData - self.__init__() s = self._strip_comments(s) for line in s.splitlines(): - line = line.strip() + line = decode_string(line.strip()) header = self._find_section_header(line) if header: self._set_current_section(header[0], header[1]) else: self._parse_item(line) self._load_files(line) + + if self._deprecated_synonym_format_was_used: + raise_warning( + "You are using the deprecated training data format to declare synonyms." + " Please use the following format: \n" + '[<entity-text>]{"entity": "<entity-type>", "value": ' + '"<entity-synonym>"}.' + "\nYou can use the following command to update your training data file:" + "\nsed -i -E 's/\\[([^)]+)\\]\\(([^)]+):([^)]+)\\)/[\\1]{" + '"entity": "\\2", "value": "\\3"}/g\' nlu.md', + category=FutureWarning, + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return TrainingData( self.training_examples, self.entity_synonyms, @@ -90,22 +91,16 @@ def _strip_comments(text: Text) -> Text: return re.sub(comment_regex, "", text) @staticmethod - def _create_section_regexes(section_names): - def make_regex(section_name): - return re.compile(r"##\s*{}:(.+)".format(section_name)) - - return {sn: make_regex(sn) for sn in section_names} - - def _find_section_header(self, line): + def _find_section_header(line: Text) -> Optional[Tuple[Text, Text]]: """Checks if the current line contains a section header and returns the section and the title.""" - for name, regex in self.section_regexes.items(): - match = re.search(regex, line) - if match is not None: - return name, match.group(1) + match = re.search(r"##\s*(.+?):(.+)", line) + if match is not None: + return match.group(1), match.group(2) + return None - def _load_files(self, line): + def _load_files(self, line: Text) -> None: """Checks line to see if filename was supplied. If so, inserts the filename into the lookup table slot for processing from the regex featurizer.""" @@ -117,75 +112,78 @@ def _load_files(self, line): {"name": self.current_title, "elements": str(fname)} ) - def _parse_item(self, line): + def _parse_item(self, line: Text) -> None: """Parses an md list item line based on the current section type.""" + import rasa.nlu.training_data.lookup_tables_parser as lookup_tables_parser + import rasa.nlu.training_data.synonyms_parser as synonyms_parser + match = re.match(item_regex, line) if match: item = match.group(1) if self.current_section == INTENT: - parsed = self._parse_training_example(item) + parsed = self.parse_training_example(item) self.training_examples.append(parsed) elif self.current_section == SYNONYM: - self._add_synonym(item, self.current_title) + synonyms_parser.add_synonym( + item, self.current_title, self.entity_synonyms + ) elif self.current_section == REGEX: self.regex_features.append( {"name": self.current_title, "pattern": item} ) elif self.current_section == LOOKUP: - self._add_item_to_lookup(item) - - def _add_item_to_lookup(self, item): - """Takes a list of lookup table dictionaries. Finds the one associated - with the current lookup, then adds the item to the list.""" - matches = [l for l in self.lookup_tables if l["name"] == self.current_title] - if not matches: - self.lookup_tables.append({"name": self.current_title, "elements": [item]}) - else: - elements = matches[0]["elements"] - elements.append(item) + lookup_tables_parser.add_item_to_lookup_tables( + self.current_title, item, self.lookup_tables + ) @staticmethod - def _find_entities_in_training_example(example): - """Extracts entities from a markdown intent example.""" - entities = [] - offset = 0 - for match in re.finditer(ent_regex, example): - entity_text = match.groupdict()["entity_text"] - entity_type = match.groupdict()["entity"] - if match.groupdict()["value"]: - entity_value = match.groupdict()["value"] - else: - entity_value = entity_text - - start_index = match.start() - offset - end_index = start_index + len(entity_text) - offset += len(match.group(0)) - len(entity_text) - - entity = build_entity(start_index, end_index, entity_value, entity_type) - entities.append(entity) - - return entities - - def _add_synonym(self, text, value): - from rasa.nlu.training_data.util import check_duplicate_synonym + def _get_validated_dict(json_str: Text) -> Dict[Text, Text]: + """Converts the provided json_str to a valid dict containing the entity + attributes. + + Users can specify entity roles, synonyms, groups for an entity in a dict, e.g. + [LA]{"entity": "city", "role": "to", "value": "Los Angeles"} + + Args: + json_str: the entity dict as string without "{}" + + Raises: + ValidationError if validation of entity dict fails. + JSONDecodeError if provided entity dict is not valid json. + + Returns: + a proper python dict + """ + import json + import rasa.utils.validation as validation_utils + import rasa.nlu.schemas.data_schema as schema + + # add {} as they are not part of the regex + try: + data = json.loads(f"{{{json_str}}}") + except JSONDecodeError as e: + raise_warning( + f"Incorrect training data format ('{{{json_str}}}'), make sure your " + f"data is valid. For more information about the format visit " + f"{DOCS_URL_TRAINING_DATA_NLU}." + ) + raise e - check_duplicate_synonym(self.entity_synonyms, text, value, "reading markdown") - self.entity_synonyms[text] = value + validation_utils.validate_training_data(data, schema.entity_dict_schema()) - def _add_synonyms(self, plain_text, entities): - """Adds synonyms found in intent examples""" - for e in entities: - e_text = plain_text[e["start"] : e["end"]] - if e_text != e["value"]: - self._add_synonym(e_text, e["value"]) + return data - def _parse_training_example(self, example): + def parse_training_example(self, example: Text) -> "Message": """Extract entities and synonyms, and convert to plain text.""" from rasa.nlu.training_data import Message + import rasa.nlu.training_data.entities_parser as entities_parser + import rasa.nlu.training_data.synonyms_parser as synonyms_parser - entities = self._find_entities_in_training_example(example) - plain_text = re.sub(ent_regex, lambda m: m.groupdict()["entity_text"], example) - self._add_synonyms(plain_text, entities) + entities = entities_parser.find_entities_in_training_example(example) + plain_text = entities_parser.replace_entities(example) + synonyms_parser.add_synonyms_from_entities( + plain_text, entities, self.entity_synonyms + ) message = Message.build(plain_text, self.current_title) @@ -193,22 +191,28 @@ def _parse_training_example(self, example): message.set("entities", entities) return message - def _set_current_section(self, section, title): + def _set_current_section(self, section: Text, title: Text) -> None: """Update parsing mode.""" - if section not in available_sections: + if section not in AVAILABLE_SECTIONS: raise ValueError( - "Found markdown section {} which is not " - "in the allowed sections {}," - "".format(section, ",".join(available_sections)) + "Found markdown section '{}' which is not " + "in the allowed sections '{}'." + "".format(section, "', '".join(AVAILABLE_SECTIONS)) ) self.current_section = section self.current_title = title + @staticmethod + def is_markdown_nlu_file(filename: Union[Text, Path]) -> bool: + content = io_utils.read_file(filename) + return any(marker in content for marker in MARKDOWN_SECTION_MARKERS) + class MarkdownWriter(TrainingDataWriter): - def dumps(self, training_data): + def dumps(self, training_data: "TrainingData") -> Text: """Transforms a TrainingData object into a markdown string.""" + md = "" md += self._generate_training_examples_md(training_data) md += self._generate_synonyms_md(training_data) @@ -217,29 +221,41 @@ def dumps(self, training_data): return md - def _generate_training_examples_md(self, training_data): - """generates markdown training examples.""" - training_examples = sorted( - [e.as_dict_nlu() for e in training_data.training_examples], - key=lambda k: k[MESSAGE_INTENT_ATTRIBUTE], - ) - md = "" - for i, example in enumerate(training_examples): - intent = training_examples[i - 1][MESSAGE_INTENT_ATTRIBUTE] - if i == 0 or intent != example[MESSAGE_INTENT_ATTRIBUTE]: - md += self._generate_section_header_md( - INTENT, - example[MESSAGE_INTENT_ATTRIBUTE], - example.get(MESSAGE_RESPONSE_KEY_ATTRIBUTE, None), - i != 0, - ) + def _generate_training_examples_md(self, training_data: "TrainingData") -> Text: + """Generates markdown training examples.""" - md += self._generate_item_md(self._generate_message_md(example)) + import rasa.nlu.training_data.util as rasa_nlu_training_data_utils - return md + training_examples = OrderedDict() + + # Sort by intent while keeping basic intent order + for example in [e.as_dict_nlu() for e in training_data.training_examples]: + rasa_nlu_training_data_utils.remove_untrainable_entities_from(example) + intent = example[INTENT] + training_examples.setdefault(intent, []) + training_examples[intent].append(example) + + # Don't prepend newline for first line + prepend_newline = False + lines = [] + + for intent, examples in training_examples.items(): + section_header = self._generate_section_header_md( + INTENT, intent, prepend_newline=prepend_newline + ) + lines.append(section_header) + prepend_newline = True + + lines += [ + self.generate_list_item(self.generate_message(example)) + for example in examples + ] + + return "".join(lines) + + def _generate_synonyms_md(self, training_data: "TrainingData") -> Text: + """Generates markdown for entity synomyms.""" - def _generate_synonyms_md(self, training_data): - """generates markdown for entity synomyms.""" entity_synonyms = sorted( training_data.entity_synonyms.items(), key=lambda x: x[1] ) @@ -248,12 +264,13 @@ def _generate_synonyms_md(self, training_data): if i == 0 or entity_synonyms[i - 1][1] != synonym[1]: md += self._generate_section_header_md(SYNONYM, synonym[1]) - md += self._generate_item_md(synonym[0]) + md += self.generate_list_item(synonym[0]) return md - def _generate_regex_features_md(self, training_data): - """generates markdown for regex features.""" + def _generate_regex_features_md(self, training_data: "TrainingData") -> Text: + """Generates markdown for regex features.""" + md = "" # regex features are already sorted regex_features = training_data.regex_features @@ -261,71 +278,39 @@ def _generate_regex_features_md(self, training_data): if i == 0 or regex_features[i - 1]["name"] != regex_feature["name"]: md += self._generate_section_header_md(REGEX, regex_feature["name"]) - md += self._generate_item_md(regex_feature["pattern"]) + md += self.generate_list_item(regex_feature["pattern"]) return md - def _generate_lookup_tables_md(self, training_data): - """generates markdown for regex features.""" + def _generate_lookup_tables_md(self, training_data: "TrainingData") -> Text: + """Generates markdown for regex features.""" + md = "" # regex features are already sorted lookup_tables = training_data.lookup_tables - for i, lookup_table in enumerate(lookup_tables): + for lookup_table in lookup_tables: md += self._generate_section_header_md(LOOKUP, lookup_table["name"]) elements = lookup_table["elements"] if isinstance(elements, list): for e in elements: - md += self._generate_item_md(e) + md += self.generate_list_item(e) else: md += self._generate_fname_md(elements) return md @staticmethod def _generate_section_header_md( - section_type, title, subtitle=None, prepend_newline=True - ): - """generates markdown section header.""" + section_type: Text, title: Text, prepend_newline: bool = True + ) -> Text: + """Generates markdown section header.""" + prefix = "\n" if prepend_newline else "" - subtitle_suffix = ( - "{}{}".format(RESPONSE_IDENTIFIER_DELIMITER, subtitle) if subtitle else "" - ) - return prefix + "## {}:{}{}\n".format( - section_type, encode_string(title), encode_string(subtitle_suffix) - ) + title = encode_string(title) - @staticmethod - def _generate_item_md(text): - """generates markdown for a list item.""" - return "- {}\n".format(encode_string(text)) + return f"{prefix}## {section_type}:{title}\n" @staticmethod - def _generate_fname_md(text): - """generates markdown for a lookup table file path.""" - return " {}\n".format(encode_string(text)) + def _generate_fname_md(text: Text) -> Text: + """Generates markdown for a lookup table file path.""" - def _generate_message_md(self, message): - """generates markdown for a message object.""" - md = "" - text = message.get("text", "") - entities = sorted(message.get("entities", []), key=lambda k: k["start"]) - - pos = 0 - for entity in entities: - md += text[pos : entity["start"]] - md += self._generate_entity_md(text, entity) - pos = entity["end"] - - md += text[pos:] - - return md - - @staticmethod - def _generate_entity_md(text, entity): - """generates markdown for an entity object.""" - entity_text = text[entity["start"] : entity["end"]] - entity_type = entity["entity"] - if entity_text != entity["value"]: - # add synonym suffix - entity_type += ":{}".format(entity["value"]) - - return "[{}]({})".format(entity_text, entity_type) + return f" {encode_string(text)}\n" diff --git a/rasa/nlu/training_data/formats/markdown_nlg.py b/rasa/nlu/training_data/formats/markdown_nlg.py index e722676647f0..6bc5ece62dab 100644 --- a/rasa/nlu/training_data/formats/markdown_nlg.py +++ b/rasa/nlu/training_data/formats/markdown_nlg.py @@ -1,16 +1,15 @@ import logging -import re import typing -from typing import Optional, Text, Any, List, Dict - -if typing.TYPE_CHECKING: - from rasa.nlu.training_data import TrainingData +from typing import Any, Dict, List, Text +from rasa.nlu.constants import TEXT from rasa.nlu.training_data.formats.readerwriter import ( TrainingDataReader, TrainingDataWriter, ) +if typing.TYPE_CHECKING: + from rasa.nlu.training_data import TrainingData logger = logging.getLogger(__name__) @@ -19,7 +18,7 @@ class NLGMarkdownReader(TrainingDataReader): """Reads markdown training data containing NLG stories and creates a TrainingData object.""" def __init__(self) -> None: - self.stories = {} + self.responses = {} def reads(self, s: Text, **kwargs: Any) -> "TrainingData": """Read markdown string and create TrainingData object""" @@ -27,13 +26,13 @@ def reads(self, s: Text, **kwargs: Any) -> "TrainingData": self.__init__() lines = s.splitlines() - self.stories = self.process_lines(lines) - return TrainingData(nlg_stories=self.stories) + self.responses = self.process_lines(lines) + return TrainingData(responses=self.responses) @staticmethod - def process_lines(lines: List[Text]) -> Dict[Text, List[Text]]: + def process_lines(lines: List[Text]) -> Dict[Text, List[Dict[Text, Text]]]: - stories = {} + responses = {} story_intent = None story_bot_utterances = [] # Keeping it a list for future additions @@ -47,16 +46,18 @@ def process_lines(lines: List[Text]) -> Dict[Text, List[Text]]: elif line.startswith("#"): # reached a new story block if story_intent: - stories[story_intent] = story_bot_utterances + responses[story_intent] = story_bot_utterances story_bot_utterances = [] story_intent = None elif line.startswith("-"): - # reach a assistant's utterance + # reach an assistant's utterance # utterance might have '-' itself, so joining them back if any utterance = "-".join(line.split("- ")[1:]) - story_bot_utterances.append(utterance) + # utterance might contain escaped newlines that we want to unescape + utterance = utterance.replace("\\n", "\n") + story_bot_utterances.append({TEXT: utterance}) elif line.startswith("*"): # reached a user intent @@ -65,39 +66,31 @@ def process_lines(lines: List[Text]) -> Dict[Text, List[Text]]: else: # reached an unknown type of line logger.warning( - "Skipping line {}. " + f"Skipping line {line_num}. " "No valid command found. " - "Line Content: '{}'" - "".format(line_num, line) + f"Line Content: '{line}'" ) except Exception as e: - msg = "Error in line {}: {}".format(line_num, e) + msg = f"Error in line {line_num}: {e}" logger.error(msg, exc_info=1) # pytype: disable=wrong-arg-types raise ValueError(msg) # add last story if story_intent: - stories[story_intent] = story_bot_utterances + responses[story_intent] = story_bot_utterances - return stories + return responses class NLGMarkdownWriter(TrainingDataWriter): - def dumps(self, training_data): + def dumps(self, training_data: "TrainingData") -> Text: """Transforms the NlG part of TrainingData object into a markdown string.""" - md = "" - md += self._generate_nlg_stories(training_data) - - return md - - @staticmethod - def _generate_nlg_stories(training_data: "TrainingData"): md = "" - for intent, utterances in training_data.nlg_stories.items(): + for intent, utterances in training_data.responses.items(): md += "## \n" - md += "* {}\n".format(intent) + md += f"* {intent}\n" for utterance in utterances: - md += "- {}\n".format(utterance) + md += f"- {utterance.get('text')}\n" md += "\n" return md diff --git a/rasa/nlu/training_data/formats/rasa.py b/rasa/nlu/training_data/formats/rasa.py index 8b144fdf8259..e748925e89f9 100644 --- a/rasa/nlu/training_data/formats/rasa.py +++ b/rasa/nlu/training_data/formats/rasa.py @@ -1,22 +1,14 @@ -from collections import defaultdict - import logging import typing -from typing import Any, Dict, Text, Tuple +from collections import defaultdict +from typing import Any, Dict, Text -from rasa.constants import DOCS_BASE_URL from rasa.nlu.training_data.formats.readerwriter import ( JsonTrainingDataReader, TrainingDataWriter, ) from rasa.nlu.training_data.util import transform_entity_synonyms from rasa.nlu.utils import json_to_string -from rasa.nlu.constants import ( - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_RESPONSE_KEY_ATTRIBUTE, - MESSAGE_RESPONSE_ATTRIBUTE, - RESPONSE_IDENTIFIER_DELIMITER, -) if typing.TYPE_CHECKING: from rasa.nlu.training_data import Message, TrainingData @@ -25,36 +17,25 @@ class RasaReader(JsonTrainingDataReader): - def read_from_json(self, js, **kwargs): + def read_from_json(self, js: Dict[Text, Any], **_) -> "TrainingData": """Loads training data stored in the rasa NLU data format.""" from rasa.nlu.training_data import Message, TrainingData + import rasa.nlu.schemas.data_schema as schema + import rasa.utils.validation as validation_utils - validate_rasa_nlu_data(js) + validation_utils.validate_training_data(js, schema.rasa_nlu_data_schema()) data = js["rasa_nlu_data"] common_examples = data.get("common_examples", []) - intent_examples = data.get("intent_examples", []) - entity_examples = data.get("entity_examples", []) entity_synonyms = data.get("entity_synonyms", []) regex_features = data.get("regex_features", []) lookup_tables = data.get("lookup_tables", []) entity_synonyms = transform_entity_synonyms(entity_synonyms) - if intent_examples or entity_examples: - logger.warning( - "DEPRECATION warning: your rasa data " - "contains 'intent_examples' " - "or 'entity_examples' which will be " - "removed in the future. Consider " - "putting all your examples " - "into the 'common_examples' section." - ) - - all_examples = common_examples + intent_examples + entity_examples training_examples = [] - for ex in all_examples: - msg = Message.build(ex["text"], ex.get("intent"), ex.get("entities")) + for ex in common_examples: + msg = Message.build(**ex) training_examples.append(msg) return TrainingData( @@ -89,91 +70,5 @@ def dumps(self, training_data: "TrainingData", **kwargs) -> Text: "entity_synonyms": formatted_synonyms, } }, - **kwargs + **kwargs, ) - - -def validate_rasa_nlu_data(data: Dict[Text, Any]) -> None: - """Validate rasa training data format to ensure proper training. - - Raises exception on failure.""" - from jsonschema import validate - from jsonschema import ValidationError - - try: - validate(data, _rasa_nlu_data_schema()) - except ValidationError as e: - e.message += ( - ". Failed to validate training data, make sure your data " - "is valid. For more information about the format visit " - "{}/nlu/training-data-format/".format(DOCS_BASE_URL) - ) - raise e - - -def _rasa_nlu_data_schema(): - training_example_schema = { - "type": "object", - "properties": { - "text": {"type": "string", "minLength": 1}, - "intent": {"type": "string"}, - "entities": { - "type": "array", - "items": { - "type": "object", - "properties": { - "start": {"type": "number"}, - "end": {"type": "number"}, - "value": {"type": "string"}, - "entity": {"type": "string"}, - }, - "required": ["start", "end", "entity"], - }, - }, - }, - "required": ["text"], - } - - regex_feature_schema = { - "type": "object", - "properties": {"name": {"type": "string"}, "pattern": {"type": "string"}}, - } - - lookup_table_schema = { - "type": "object", - "properties": { - "name": {"type": "string"}, - "elements": { - "oneOf": [ - {"type": "array", "items": {"type": "string"}}, - {"type": "string"}, - ] - }, - }, - } - - return { - "type": "object", - "properties": { - "rasa_nlu_data": { - "type": "object", - "properties": { - "regex_features": {"type": "array", "items": regex_feature_schema}, - "common_examples": { - "type": "array", - "items": training_example_schema, - }, - "intent_examples": { - "type": "array", - "items": training_example_schema, - }, - "entity_examples": { - "type": "array", - "items": training_example_schema, - }, - "lookup_tables": {"type": "array", "items": lookup_table_schema}, - }, - } - }, - "additionalProperties": False, - } diff --git a/rasa/nlu/training_data/formats/rasa_yaml.py b/rasa/nlu/training_data/formats/rasa_yaml.py new file mode 100644 index 000000000000..f27f1908825a --- /dev/null +++ b/rasa/nlu/training_data/formats/rasa_yaml.py @@ -0,0 +1,483 @@ +import logging +from collections import OrderedDict +from pathlib import Path +from typing import ( + Text, + Any, + List, + Dict, + Tuple, + TYPE_CHECKING, + Union, + Iterator, + Optional, +) + +from rasa.utils import validation +from ruamel.yaml import YAMLError, StringIO + +import rasa.utils.io as io_utils +from rasa.constants import ( + DOCS_URL_TRAINING_DATA_NLU, + LATEST_TRAINING_DATA_FORMAT_VERSION, +) +from rasa.data import YAML_FILE_EXTENSIONS +from rasa.nlu.training_data.formats.readerwriter import ( + TrainingDataReader, + TrainingDataWriter, +) +from rasa.utils.common import raise_warning + +if TYPE_CHECKING: + from rasa.nlu.training_data import TrainingData, Message + +logger = logging.getLogger(__name__) + +KEY_NLU = "nlu" +KEY_RESPONSES = "responses" +KEY_INTENT = "intent" +KEY_INTENT_EXAMPLES = "examples" +KEY_INTENT_TEXT = "text" +KEY_SYNONYM = "synonym" +KEY_SYNONYM_EXAMPLES = "examples" +KEY_REGEX = "regex" +KEY_REGEX_EXAMPLES = "examples" +KEY_LOOKUP = "lookup" +KEY_LOOKUP_EXAMPLES = "examples" +KEY_METADATA = "metadata" + +MULTILINE_TRAINING_EXAMPLE_LEADING_SYMBOL = "-" + +NLU_SCHEMA_FILE = "nlu/schemas/nlu.yml" + +STRIP_SYMBOLS = "\n\r " + + +class RasaYAMLReader(TrainingDataReader): + """Reads YAML training data and creates a TrainingData object.""" + + def __init__(self) -> None: + super().__init__() + self.training_examples: List[Message] = [] + self.entity_synonyms: Dict[Text, Text] = {} + self.regex_features: List[Dict[Text, Text]] = [] + self.lookup_tables: List[Dict[Text, Any]] = [] + self.responses: Dict[Text, List[Dict[Text, Any]]] = {} + + @staticmethod + def validate(string: Text) -> None: + """Check if the string adheres to the NLU yaml data schema. + + If the string is not in the right format, an exception will be raised.""" + try: + validation.validate_yaml_schema(string, NLU_SCHEMA_FILE) + except validation.InvalidYamlFileError as e: + raise ValueError from e + + def reads(self, string: Text, **kwargs: Any) -> "TrainingData": + """Reads TrainingData in YAML format from a string. + + Args: + string: String with YAML training data. + **kwargs: Keyword arguments. + + Returns: + New `TrainingData` object with parsed training data. + """ + from rasa.nlu.training_data import TrainingData + from rasa.validator import Validator + + self.validate(string) + + yaml_content = io_utils.read_yaml(string) + + if not Validator.validate_training_data_format_version( + yaml_content, self.filename + ): + return TrainingData() + + for key, value in yaml_content.items(): # pytype: disable=attribute-error + if key == KEY_NLU: + self._parse_nlu(value) + elif key == KEY_RESPONSES: + self._parse_responses(value) + + return TrainingData( + self.training_examples, + self.entity_synonyms, + self.regex_features, + self.lookup_tables, + self.responses, + ) + + def _parse_nlu(self, nlu_data: Optional[List[Dict[Text, Any]]]) -> None: + + if not nlu_data: + return + + for nlu_item in nlu_data: + if not isinstance(nlu_item, dict): + raise_warning( + f"Unexpected block found in '{self.filename}':\n" + f"{nlu_item}\n" + f"Items under the '{KEY_NLU}' key must be YAML dictionaries. " + f"This block will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + continue + + if KEY_INTENT in nlu_item.keys(): + self._parse_intent(nlu_item) + elif KEY_SYNONYM in nlu_item.keys(): + self._parse_synonym(nlu_item) + elif KEY_REGEX in nlu_item.keys(): + self._parse_regex(nlu_item) + elif KEY_LOOKUP in nlu_item.keys(): + self._parse_lookup(nlu_item) + else: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"Could not find supported key in the section:\n" + f"{nlu_item}\n" + f"Supported keys are: '{KEY_INTENT}', '{KEY_SYNONYM}', " + f"'{KEY_REGEX}', '{KEY_LOOKUP}'. " + f"This section will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + + def _parse_responses(self, responses_data: Dict[Text, List[Any]]) -> None: + from rasa.core.domain import Domain + + self.responses = Domain.collect_templates(responses_data) + + def _parse_intent(self, data: Dict[Text, Any]) -> None: + from rasa.nlu.training_data import Message + import rasa.nlu.training_data.entities_parser as entities_parser + import rasa.nlu.training_data.synonyms_parser as synonyms_parser + import rasa.nlu.constants as nlu_constants + + intent = data.get(KEY_INTENT, "") + if not intent: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"The intent has an empty name. " + f"Intents should have a name defined under the {KEY_INTENT} key. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + examples = data.get(KEY_INTENT_EXAMPLES, "") + for example, entities in self._parse_training_examples(examples, intent): + + plain_text = entities_parser.replace_entities(example) + + synonyms_parser.add_synonyms_from_entities( + plain_text, entities, self.entity_synonyms + ) + + message = Message.build(plain_text, intent) + if entities: + message.set(nlu_constants.ENTITIES, entities) + self.training_examples.append(message) + + def _parse_training_examples( + self, examples: Union[Text, List[Dict[Text, Any]]], intent: Text + ) -> List[Tuple[Text, List[Dict[Text, Any]]]]: + import rasa.nlu.training_data.entities_parser as entities_parser + + if isinstance(examples, list): + example_strings = [ + # pytype: disable=attribute-error + example.get(KEY_INTENT_TEXT, "").strip(STRIP_SYMBOLS) + for example in examples + if example + ] + # pytype: enable=attribute-error + elif isinstance(examples, str): + example_strings = self._parse_multiline_example(intent, examples) + else: + raise_warning( + f"Unexpected block found in '{self.filename}' " + f"while processing intent '{intent}':\n" + f"{examples}\n" + f"This block will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return [] + + if not example_strings: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"Intent '{intent}' has no examples.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + + results = [] + for example in example_strings: + entities = entities_parser.find_entities_in_training_example(example) + results.append((example, entities)) + + return results + + def _parse_synonym(self, nlu_item: Dict[Text, Any]) -> None: + import rasa.nlu.training_data.synonyms_parser as synonyms_parser + + synonym_name = nlu_item[KEY_SYNONYM] + if not synonym_name: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"The synonym has an empty name. " + f"Synonyms should have a name defined under the {KEY_SYNONYM} key. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + examples = nlu_item.get(KEY_SYNONYM_EXAMPLES, "") + + if not examples: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"{KEY_SYNONYM}: {synonym_name} doesn't have any examples. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + if not isinstance(examples, str): + raise_warning( + f"Unexpected block found in '{self.filename}':\n" + f"{examples}\n" + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + for example in self._parse_multiline_example(synonym_name, examples): + synonyms_parser.add_synonym(example, synonym_name, self.entity_synonyms) + + def _parse_regex(self, nlu_item: Dict[Text, Any]) -> None: + regex_name = nlu_item[KEY_REGEX] + if not regex_name: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"The regex has an empty name." + f"Regex should have a name defined under the '{KEY_REGEX}' key. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + examples = nlu_item.get(KEY_REGEX_EXAMPLES, "") + if not examples: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"'{KEY_REGEX}: {regex_name}' doesn't have any examples. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + if not isinstance(examples, str): + raise_warning( + f"Unexpected block found in '{self.filename}':\n" + f"{examples}\n" + f"This block will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + for example in self._parse_multiline_example(regex_name, examples): + self.regex_features.append({"name": regex_name, "pattern": example}) + + def _parse_lookup(self, nlu_item: Dict[Text, Any]): + import rasa.nlu.training_data.lookup_tables_parser as lookup_tables_parser + + lookup_item_name = nlu_item[KEY_LOOKUP] + if not lookup_item_name: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"The lookup item has an empty name. " + f"Lookup items should have a name defined under the '{KEY_LOOKUP}' " + f"key. It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + examples = nlu_item.get(KEY_LOOKUP_EXAMPLES, "") + if not examples: + raise_warning( + f"Issue found while processing '{self.filename}': " + f"'{KEY_LOOKUP}: {lookup_item_name}' doesn't have any examples. " + f"It will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + if not isinstance(examples, str): + raise_warning( + f"Unexpected block found in '{self.filename}':\n" + f"{examples}\n" + f"This block will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + return + + for example in self._parse_multiline_example(lookup_item_name, examples): + lookup_tables_parser.add_item_to_lookup_tables( + lookup_item_name, example, self.lookup_tables + ) + + def _parse_multiline_example(self, item: Text, examples: Text) -> Iterator[Text]: + for example in examples.splitlines(): + if not example.startswith(MULTILINE_TRAINING_EXAMPLE_LEADING_SYMBOL): + raise_warning( + f"Issue found while processing '{self.filename}': " + f"The item '{item}' contains an example that doesn't start with a " + f"'{MULTILINE_TRAINING_EXAMPLE_LEADING_SYMBOL}' symbol: " + f"{example}\n" + f"This training example will be skipped.", + docs=DOCS_URL_TRAINING_DATA_NLU, + ) + continue + yield example[1:].strip(STRIP_SYMBOLS) + + @staticmethod + def is_yaml_nlu_file(filename: Text) -> bool: + """Checks if the specified file possibly contains NLU training data in YAML. + + Args: + filename: name of the file to check. + + Returns: + `True` if the `filename` is possibly a valid YAML NLU file, + `False` otherwise. + """ + if Path(filename).suffix not in YAML_FILE_EXTENSIONS: + return False + + try: + content = io_utils.read_yaml_file(filename) + + return any(key in content for key in {KEY_NLU, KEY_RESPONSES}) + except (YAMLError, Warning) as e: + logger.error( + f"Tried to check if '{filename}' is an NLU file, but failed to " + f"read it. If this file contains NLU data, you should " + f"investigate this error, otherwise it is probably best to " + f"move the file to a different location. " + f"Error: {e}" + ) + return False + + +class RasaYAMLWriter(TrainingDataWriter): + """Writes training data into a file in a YAML format.""" + + def dumps(self, training_data: "TrainingData") -> Text: + """Turns TrainingData into a string.""" + stream = StringIO() + self.dump(stream, training_data) + return stream.getvalue() + + def dump( + self, target: Union[Text, Path, StringIO], training_data: "TrainingData" + ) -> None: + """Writes training data into a file in a YAML format. + + Args: + target: Name of the target object to write the YAML to. + training_data: TrainingData object. + """ + from rasa.validator import KEY_TRAINING_DATA_FORMAT_VERSION + from ruamel.yaml.scalarstring import DoubleQuotedScalarString + + nlu_items = [] + nlu_items.extend(self.process_intents(training_data)) + nlu_items.extend(self.process_synonyms(training_data)) + nlu_items.extend(self.process_regexes(training_data)) + nlu_items.extend(self.process_lookup_tables(training_data)) + + result = OrderedDict() + result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString( + LATEST_TRAINING_DATA_FORMAT_VERSION + ) + + if nlu_items: + result[KEY_NLU] = nlu_items + + if training_data.responses: + result[KEY_RESPONSES] = training_data.responses + + io_utils.write_yaml(result, target, True) + + @classmethod + def process_intents(cls, training_data: "TrainingData") -> List[OrderedDict]: + training_data = cls.prepare_training_examples(training_data) + return RasaYAMLWriter.process_training_examples_by_key( + training_data, + KEY_INTENT, + KEY_INTENT_EXAMPLES, + TrainingDataWriter.generate_message, + ) + + @classmethod + def process_synonyms(cls, training_data: "TrainingData") -> List[OrderedDict]: + inverted_synonyms = OrderedDict() + for example, synonym in training_data.entity_synonyms.items(): + if not inverted_synonyms.get(synonym): + inverted_synonyms[synonym] = [] + inverted_synonyms[synonym].append(example) + + return cls.process_training_examples_by_key( + inverted_synonyms, KEY_SYNONYM, KEY_SYNONYM_EXAMPLES + ) + + @classmethod + def process_regexes(cls, training_data: "TrainingData") -> List[OrderedDict]: + inverted_regexes = OrderedDict() + for regex in training_data.regex_features: + if not inverted_regexes.get(regex["name"]): + inverted_regexes[regex["name"]] = [] + inverted_regexes[regex["name"]].append(regex["pattern"]) + + return cls.process_training_examples_by_key( + inverted_regexes, KEY_REGEX, KEY_REGEX_EXAMPLES + ) + + @classmethod + def process_lookup_tables(cls, training_data: "TrainingData") -> List[OrderedDict]: + prepared_lookup_tables = OrderedDict() + for lookup_table in training_data.lookup_tables: + prepared_lookup_tables[lookup_table["name"]] = lookup_table["elements"] + + return cls.process_training_examples_by_key( + prepared_lookup_tables, KEY_LOOKUP, KEY_LOOKUP_EXAMPLES + ) + + @staticmethod + def process_training_examples_by_key( + training_examples: Dict, + key_name: Text, + key_examples: Text, + example_extraction_predicate=lambda x: x, + ) -> List[OrderedDict]: + from ruamel.yaml.scalarstring import LiteralScalarString + + result = [] + for entity_key, examples in training_examples.items(): + + converted_examples = [ + TrainingDataWriter.generate_list_item( + example_extraction_predicate(example).strip(STRIP_SYMBOLS) + ) + for example in examples + ] + + next_item = OrderedDict() + next_item[key_name] = entity_key + next_item[key_examples] = LiteralScalarString("".join(converted_examples)) + result.append(next_item) + + return result diff --git a/rasa/nlu/training_data/formats/readerwriter.py b/rasa/nlu/training_data/formats/readerwriter.py index 36273bba0688..d8763bfc676c 100644 --- a/rasa/nlu/training_data/formats/readerwriter.py +++ b/rasa/nlu/training_data/formats/readerwriter.py @@ -1,36 +1,144 @@ import json +from collections import OrderedDict +from pathlib import Path -import rasa.utils.io +from rasa.core.constants import INTENT_MESSAGE_PREFIX + +from rasa.nlu.constants import ( + INTENT, + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_GROUP, +) + +import rasa.utils.io as io_utils +import typing from rasa.nlu import utils +from typing import Text, Dict, Any, Union + +if typing.TYPE_CHECKING: + from rasa.nlu.training_data import TrainingData + +class TrainingDataReader: + def __init__(self): + self.filename: Text = "" -class TrainingDataReader(object): - def read(self, filename, **kwargs): + def read(self, filename: Union[Text, Path], **kwargs: Any) -> "TrainingData": """Reads TrainingData from a file.""" - return self.reads(rasa.utils.io.read_file(filename), **kwargs) + self.filename = filename + return self.reads(io_utils.read_file(filename), **kwargs) - def reads(self, s, **kwargs): + def reads(self, s: Text, **kwargs: Any) -> "TrainingData": """Reads TrainingData from a string.""" raise NotImplementedError -class TrainingDataWriter(object): - def dump(self, filename, training_data): +class TrainingDataWriter: + def dump(self, filename: Text, training_data) -> None: """Writes a TrainingData object in markdown format to a file.""" s = self.dumps(training_data) utils.write_to_file(filename, s) - def dumps(self, training_data): + def dumps(self, training_data: "TrainingData") -> Text: """Turns TrainingData into a string.""" raise NotImplementedError + @staticmethod + def prepare_training_examples(training_data: "TrainingData") -> OrderedDict: + """Pre-processes training data examples by removing not trainable entities.""" + + import rasa.nlu.training_data.util as rasa_nlu_training_data_utils + + training_examples = OrderedDict() + + # Sort by intent while keeping basic intent order + for example in [e.as_dict_nlu() for e in training_data.training_examples]: + rasa_nlu_training_data_utils.remove_untrainable_entities_from(example) + intent = example[INTENT] + training_examples.setdefault(intent, []) + training_examples[intent].append(example) + + return training_examples + + @staticmethod + def generate_list_item(text: Text) -> Text: + """Generates text for a list item.""" + + return f"- {io_utils.encode_string(text)}\n" + + @staticmethod + def generate_message(message: Dict[Text, Any]) -> Text: + """Generates text for a message object.""" + + md = "" + text = message.get("text", "") + + pos = 0 + + # If a message was prefixed with `INTENT_MESSAGE_PREFIX` (this can only happen + # in end-to-end stories) then potential entities were provided in the json + # format (e.g. `/greet{"name": "Rasa"}) and we don't have to add the NLU + # entity annotation + if not text.startswith(INTENT_MESSAGE_PREFIX): + entities = sorted(message.get("entities", []), key=lambda k: k["start"]) + + for entity in entities: + md += text[pos : entity["start"]] + md += TrainingDataWriter.generate_entity(text, entity) + pos = entity["end"] + + md += text[pos:] + + return md + + @staticmethod + def generate_entity(text: Text, entity: Dict[Text, Any]) -> Text: + """Generates text for an entity object.""" + import json + + entity_text = text[ + entity[ENTITY_ATTRIBUTE_START] : entity[ENTITY_ATTRIBUTE_END] + ] + entity_type = entity.get(ENTITY_ATTRIBUTE_TYPE) + entity_value = entity.get(ENTITY_ATTRIBUTE_VALUE) + entity_role = entity.get(ENTITY_ATTRIBUTE_ROLE) + entity_group = entity.get(ENTITY_ATTRIBUTE_GROUP) + + if entity_value and entity_value == entity_text: + entity_value = None + + use_short_syntax = ( + entity_value is None and entity_role is None and entity_group is None + ) + + if use_short_syntax: + return f"[{entity_text}]({entity_type})" + + entity_dict = OrderedDict( + [ + (ENTITY_ATTRIBUTE_TYPE, entity_type), + (ENTITY_ATTRIBUTE_ROLE, entity_role), + (ENTITY_ATTRIBUTE_GROUP, entity_group), + (ENTITY_ATTRIBUTE_VALUE, entity_value), + ] + ) + entity_dict = OrderedDict( + [(k, v) for k, v in entity_dict.items() if v is not None] + ) + + return f"[{entity_text}]{json.dumps(entity_dict)}" + class JsonTrainingDataReader(TrainingDataReader): - def reads(self, s, **kwargs): + def reads(self, s: Text, **kwargs: Any) -> "TrainingData": """Transforms string into json object and passes it on.""" js = json.loads(s) return self.read_from_json(js, **kwargs) - def read_from_json(self, js, **kwargs): + def read_from_json(self, js: Dict[Text, Any], **kwargs: Any) -> "TrainingData": """Reads TrainingData from a json object.""" raise NotImplementedError diff --git a/rasa/nlu/training_data/loading.py b/rasa/nlu/training_data/loading.py index 14a3864a1d22..041bc55f1396 100644 --- a/rasa/nlu/training_data/loading.py +++ b/rasa/nlu/training_data/loading.py @@ -1,12 +1,12 @@ import json import logging import os - +import re import typing from typing import Optional, Text +import rasa.utils.io as io_utils from rasa.nlu import utils -from rasa.nlu.training_data.formats import markdown from rasa.nlu.training_data.formats.dialogflow import ( DIALOGFLOW_AGENT, DIALOGFLOW_ENTITIES, @@ -16,8 +16,6 @@ DIALOGFLOW_PACKAGE, ) from rasa.utils.endpoints import EndpointConfig -import rasa.utils.io as io_utils -import re if typing.TYPE_CHECKING: from rasa.nlu.training_data import TrainingData @@ -30,11 +28,12 @@ LUIS = "luis" RASA = "rasa_nlu" MARKDOWN = "md" +RASA_YAML = "rasa_yml" UNK = "unk" MARKDOWN_NLG = "nlg.md" +JSON = "json" DIALOGFLOW_RELEVANT = {DIALOGFLOW_ENTITIES, DIALOGFLOW_INTENT} -_markdown_section_markers = ["## {}:".format(s) for s in markdown.available_sections] _json_format_heuristics = { WIT: lambda js, fn: "data" in js and isinstance(js.get("data"), list), LUIS: lambda js, fn: "luis_schema_version" in js, @@ -51,7 +50,7 @@ # ## # * intent/response_key # - response_text -_nlg_markdown_marker_regex = re.compile(r"##\s*.*\n\*.*\/.*\n\s*\t*\-.*") +_nlg_markdown_marker_regex = re.compile(r"##\s*.*\n\*[^:]*\/.*\n\s*\t*\-.*") def load_data(resource_name: Text, language: Optional[Text] = "en") -> "TrainingData": @@ -61,7 +60,7 @@ def load_data(resource_name: Text, language: Optional[Text] = "en") -> "Training from rasa.nlu.training_data import TrainingData if not os.path.exists(resource_name): - raise ValueError("File '{}' does not exist.".format(resource_name)) + raise ValueError(f"File '{resource_name}' does not exist.") files = io_utils.list_files(resource_name) data_sets = [_load(f, language) for f in files] @@ -73,9 +72,6 @@ def load_data(resource_name: Text, language: Optional[Text] = "en") -> "Training else: training_data = data_sets[0].merge(*data_sets[1:]) - if training_data.nlg_stories: - training_data.fill_response_phrases() - return training_data @@ -95,12 +91,13 @@ async def load_data_from_endpoint( return training_data except Exception as e: - logger.warning("Could not retrieve training data from URL:\n{}".format(e)) + logger.warning(f"Could not retrieve training data from URL:\n{e}") def _reader_factory(fformat: Text) -> Optional["TrainingDataReader"]: """Generates the appropriate reader class based on the file format.""" from rasa.nlu.training_data.formats import ( + RasaYAMLReader, MarkdownReader, WitReader, LuisReader, @@ -122,6 +119,8 @@ def _reader_factory(fformat: Text) -> Optional["TrainingDataReader"]: reader = MarkdownReader() elif fformat == MARKDOWN_NLG: reader = NLGMarkdownReader() + elif fformat == RASA_YAML: + reader = RasaYAMLReader() return reader @@ -130,9 +129,8 @@ def _load(filename: Text, language: Optional[Text] = "en") -> Optional["Training fformat = guess_format(filename) if fformat == UNK: - raise ValueError("Unknown data format for file '{}'.".format(filename)) + raise ValueError(f"Unknown data format for file '{filename}'.") - logger.debug("Training data format of '{}' is '{}'.".format(filename, fformat)) reader = _reader_factory(fformat) if reader: @@ -157,6 +155,8 @@ def guess_format(filename: Text) -> Text: Returns: Guessed file format. """ + from rasa.nlu.training_data.formats import RasaYAMLReader, markdown + guess = UNK content = "" @@ -164,16 +164,20 @@ def guess_format(filename: Text) -> Text: content = io_utils.read_file(filename) js = json.loads(content) except ValueError: - if any([marker in content for marker in _markdown_section_markers]): + if any(marker in content for marker in markdown.MARKDOWN_SECTION_MARKERS): guess = MARKDOWN elif _is_nlg_story_format(content): guess = MARKDOWN_NLG + elif RasaYAMLReader.is_yaml_nlu_file(filename): + guess = RASA_YAML else: for fformat, format_heuristic in _json_format_heuristics.items(): if format_heuristic(js, filename): guess = fformat break + logger.debug(f"Training data format of '{filename}' is '{guess}'.") + return guess diff --git a/rasa/nlu/training_data/lookup_tables_parser.py b/rasa/nlu/training_data/lookup_tables_parser.py new file mode 100644 index 000000000000..c860b259ea80 --- /dev/null +++ b/rasa/nlu/training_data/lookup_tables_parser.py @@ -0,0 +1,20 @@ +from typing import Any, Text, List, Dict + + +def add_item_to_lookup_tables( + title: Text, item: Text, existing_lookup_tables: List[Dict[Text, List[Text]]] +) -> None: + """Takes a list of lookup table dictionaries. Finds the one associated + with the current lookup, then adds the item to the list. + + Args: + title: Name of the lookup item. + item: The lookup item. + existing_lookup_tables: Existing lookup items that will be extended. + """ + matches = [table for table in existing_lookup_tables if table["name"] == title] + if not matches: + existing_lookup_tables.append({"name": title, "elements": [item]}) + else: + elements = matches[0]["elements"] + elements.append(item) diff --git a/rasa/nlu/training_data/message.py b/rasa/nlu/training_data/message.py index 6ed2fdf01a7f..f1c4f79b157f 100644 --- a/rasa/nlu/training_data/message.py +++ b/rasa/nlu/training_data/message.py @@ -1,49 +1,76 @@ -# -*- coding: utf-8 -*- +from typing import Any, Optional, Tuple, Text, Dict, Set, List, Union -from rasa.nlu.utils import ordered +import numpy as np +import scipy.sparse +import typing +from rasa.exceptions import RasaException from rasa.nlu.constants import ( - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_ENTITIES_ATTRIBUTE, - MESSAGE_RESPONSE_KEY_ATTRIBUTE, + ENTITIES, + INTENT, + RESPONSE, + RESPONSE_KEY_ATTRIBUTE, + TEXT, RESPONSE_IDENTIFIER_DELIMITER, + FEATURE_TYPE_SEQUENCE, + FEATURE_TYPE_SENTENCE, ) +from rasa.nlu.utils import ordered + +if typing.TYPE_CHECKING: + from rasa.nlu.featurizers.featurizer import Features -class Message(object): - def __init__(self, text, data=None, output_properties=None, time=None): +class Message: + def __init__( + self, + text: Text, + data: Optional[Dict[Text, Any]] = None, + output_properties: Optional[Set] = None, + time: Optional[Text] = None, + features: Optional[List["Features"]] = None, + **kwargs, + ) -> None: self.text = text self.time = time self.data = data if data else {} + self.features = features if features else [] + + self.data.update(**kwargs) if output_properties: self.output_properties = output_properties else: self.output_properties = set() - def set(self, prop, info, add_to_output=False): - self.data[prop] = info - if add_to_output: - self.output_properties.add(prop) + def add_features(self, features: Optional["Features"]) -> None: + if features is not None: + self.features.append(features) + + def set(self, prop, info, add_to_output=False) -> None: + if prop == TEXT: + self.text = info + else: + self.data[prop] = info + if add_to_output: + self.output_properties.add(prop) - def get(self, prop, default=None): - if prop == MESSAGE_TEXT_ATTRIBUTE: + def get(self, prop, default=None) -> Any: + if prop == TEXT: return self.text return self.data.get(prop, default) - def as_dict_nlu(self): + def as_dict_nlu(self) -> dict: """Get dict representation of message as it would appear in training data""" d = self.as_dict() - if d.get(MESSAGE_INTENT_ATTRIBUTE, None): - d[MESSAGE_INTENT_ATTRIBUTE] = self.get_combined_intent_response_key() - d.pop(MESSAGE_RESPONSE_KEY_ATTRIBUTE, None) - d.pop(MESSAGE_RESPONSE_ATTRIBUTE, None) + if d.get(INTENT, None): + d[INTENT] = self.get_combined_intent_response_key() + d.pop(RESPONSE_KEY_ATTRIBUTE, None) + d.pop(RESPONSE, None) return d - def as_dict(self, only_output_properties=False): + def as_dict(self, only_output_properties=False) -> dict: if only_output_properties: d = { key: value @@ -53,49 +80,203 @@ def as_dict(self, only_output_properties=False): else: d = self.data - # Filter all keys with None value. These could have come while building the Message object in markdown format + # Filter all keys with None value. These could have come while building the + # Message object in markdown format d = {key: value for key, value in d.items() if value is not None} return dict(d, text=self.text) - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, Message): return False else: return (other.text, ordered(other.data)) == (self.text, ordered(self.data)) - def __hash__(self): + def __hash__(self) -> int: return hash((self.text, str(ordered(self.data)))) @classmethod - def build(cls, text, intent=None, entities=None): + def build( + cls, + text: Text, + intent: Optional[Text] = None, + entities: List[Dict[Text, Any]] = None, + **kwargs, + ) -> "Message": data = {} if intent: split_intent, response_key = cls.separate_intent_response_key(intent) - data[MESSAGE_INTENT_ATTRIBUTE] = split_intent + if split_intent: + data[INTENT] = split_intent if response_key: - data[MESSAGE_RESPONSE_KEY_ATTRIBUTE] = response_key + data[RESPONSE_KEY_ATTRIBUTE] = response_key if entities: - data[MESSAGE_ENTITIES_ATTRIBUTE] = entities - return cls(text, data) + data[ENTITIES] = entities + return cls(text, data, **kwargs) - def get_combined_intent_response_key(self): + def get_combined_intent_response_key(self) -> Text: """Get intent as it appears in training data""" - intent = self.get(MESSAGE_INTENT_ATTRIBUTE) - response_key = self.get(MESSAGE_RESPONSE_KEY_ATTRIBUTE) + intent = self.get(INTENT) + response_key = self.get(RESPONSE_KEY_ATTRIBUTE) response_key_suffix = ( - "{}{}".format(RESPONSE_IDENTIFIER_DELIMITER, response_key) - if response_key - else "" + f"{RESPONSE_IDENTIFIER_DELIMITER}{response_key}" if response_key else "" ) - return "{}{}".format(intent, response_key_suffix) + return f"{intent}{response_key_suffix}" @staticmethod - def separate_intent_response_key(original_intent): + def separate_intent_response_key( + original_intent: Text, + ) -> Tuple[Text, Optional[Text]]: split_title = original_intent.split(RESPONSE_IDENTIFIER_DELIMITER) if len(split_title) == 2: return split_title[0], split_title[1] elif len(split_title) == 1: return split_title[0], None + + raise RasaException( + f"Intent name '{original_intent}' is invalid, " + f"it cannot contain more than one '{RESPONSE_IDENTIFIER_DELIMITER}'." + ) + + def get_sparse_features( + self, attribute: Text, featurizers: Optional[List[Text]] = None + ) -> Tuple[Optional[scipy.sparse.spmatrix], Optional[scipy.sparse.spmatrix]]: + """Get all sparse features for the given attribute that are coming from the + given list of featurizers. + + If no featurizers are provided, all available features will be considered. + + Args: + attribute: message attribute + featurizers: names of featurizers to consider + + Returns: + Sparse features. + """ + if featurizers is None: + featurizers = [] + + sequence_features, sentence_features = self._filter_sparse_features( + attribute, featurizers + ) + + sequence_features = self._combine_features(sequence_features) + sentence_features = self._combine_features(sentence_features) + + return sequence_features, sentence_features + + def get_dense_features( + self, attribute: Text, featurizers: Optional[List[Text]] = None + ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]: + """Get all dense features for the given attribute that are coming from the given + list of featurizers. + + If no featurizers are provided, all available features will be considered. + + Args: + attribute: message attribute + featurizers: names of featurizers to consider + + Returns: + Dense features. + """ + if featurizers is None: + featurizers = [] + + sequence_features, sentence_features = self._filter_dense_features( + attribute, featurizers + ) + + sequence_features = self._combine_features(sequence_features) + sentence_features = self._combine_features(sentence_features) + + return sequence_features, sentence_features + + def features_present( + self, attribute: Text, featurizers: Optional[List[Text]] = None + ) -> bool: + """Check if there are any features present for the given attribute and + featurizers. + + If no featurizers are provided, all available features will be considered. + + Args: + attribute: message attribute + featurizers: names of featurizers to consider + + Returns: + ``True``, if features are present, ``False`` otherwise + """ + if featurizers is None: + featurizers = [] + + ( + sequence_sparse_features, + sentence_sparse_features, + ) = self._filter_sparse_features(attribute, featurizers) + sequence_dense_features, sentence_dense_features = self._filter_dense_features( + attribute, featurizers + ) + + return ( + len(sequence_sparse_features) > 0 + or len(sentence_sparse_features) > 0 + or len(sequence_dense_features) > 0 + or len(sentence_dense_features) > 0 + ) + + def _filter_dense_features( + self, attribute: Text, featurizers: List[Text] + ) -> Tuple[List["Features"], List["Features"]]: + sentence_features = [ + f + for f in self.features + if f.message_attribute == attribute + and f.is_dense() + and f.type == FEATURE_TYPE_SENTENCE + and (f.origin in featurizers or not featurizers) + ] + sequence_features = [ + f + for f in self.features + if f.message_attribute == attribute + and f.is_dense() + and f.type == FEATURE_TYPE_SEQUENCE + and (f.origin in featurizers or not featurizers) + ] + return sequence_features, sentence_features + + def _filter_sparse_features( + self, attribute: Text, featurizers: List[Text] + ) -> Tuple[List["Features"], List["Features"]]: + sentence_features = [ + f + for f in self.features + if f.message_attribute == attribute + and f.is_sparse() + and f.type == FEATURE_TYPE_SENTENCE + and (f.origin in featurizers or not featurizers) + ] + sequence_features = [ + f + for f in self.features + if f.message_attribute == attribute + and f.is_sparse() + and f.type == FEATURE_TYPE_SEQUENCE + and (f.origin in featurizers or not featurizers) + ] + + return sequence_features, sentence_features + + @staticmethod + def _combine_features( + features: List["Features"], + ) -> Optional[Union[np.ndarray, scipy.sparse.spmatrix]]: + combined_features = None + + for f in features: + combined_features = f.combine_with_features(combined_features) + + return combined_features diff --git a/rasa/nlu/training_data/synonyms_parser.py b/rasa/nlu/training_data/synonyms_parser.py new file mode 100644 index 000000000000..5d8aa1459c48 --- /dev/null +++ b/rasa/nlu/training_data/synonyms_parser.py @@ -0,0 +1,42 @@ +from typing import Text, List, Dict + +from rasa.nlu.constants import ( + ENTITY_ATTRIBUTE_VALUE, + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_START, +) + + +def add_synonyms_from_entities( + plain_text: Text, entities: List[Dict], existing_synonyms: Dict +) -> None: + """Adds synonyms found in intent examples. + + Args: + plain_text: Plain (with removed special symbols) user utterance. + entities: Entities that were extracted from the original user utterance. + existing_synonyms: The dict with existing synonyms mappings that will + be extended. + """ + for e in entities: + e_text = plain_text[e[ENTITY_ATTRIBUTE_START] : e[ENTITY_ATTRIBUTE_END]] + if e_text != e[ENTITY_ATTRIBUTE_VALUE]: + add_synonym(e_text, e[ENTITY_ATTRIBUTE_VALUE], existing_synonyms) + + +def add_synonym( + synonym_value: Text, synonym_name: Text, existing_synonyms: Dict +) -> None: + """Adds a new synonym mapping to the provided list of synonyms. + + Args: + synonym_value: Value of the synonym. + synonym_name: Name of the synonym. + existing_synonyms: Dictionary will synonym mappings that will be extended. + """ + import rasa.nlu.training_data.util as training_data_util + + training_data_util.check_duplicate_synonym( + existing_synonyms, synonym_value, synonym_name, "reading markdown" + ) + existing_synonyms[synonym_value] = synonym_name diff --git a/rasa/nlu/training_data/training_data.py b/rasa/nlu/training_data/training_data.py index e7c1e6800a14..fef7e7d6e17b 100644 --- a/rasa/nlu/training_data/training_data.py +++ b/rasa/nlu/training_data/training_data.py @@ -1,32 +1,40 @@ -# -*- coding: utf-8 -*- - import logging import os +from pathlib import Path import random -import warnings -from collections import Counter -from copy import deepcopy +from collections import Counter, OrderedDict +import copy from os.path import relpath -from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Text, Tuple, Callable -from rasa.nlu.utils import list_to_str +from rasa.data import ( + JSON_FILE_EXTENSIONS, + MARKDOWN_FILE_EXTENSIONS, + YAML_FILE_EXTENSIONS, +) import rasa.nlu.utils -import rasa.utils.common as rasa_utils -from rasa.nlu.training_data.message import Message -from rasa.nlu.training_data.util import check_duplicate_synonym +from rasa.utils.common import raise_warning, lazy_property from rasa.nlu.constants import ( - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_RESPONSE_KEY_ATTRIBUTE, - RESPONSE_IDENTIFIER_DELIMITER, + RESPONSE, + RESPONSE_KEY_ATTRIBUTE, + NO_ENTITY_TAG, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_ROLE, + INTENT, + ENTITIES, + TEXT, ) +from rasa.nlu.training_data.message import Message +from rasa.nlu.training_data.util import check_duplicate_synonym +from rasa.nlu.utils import list_to_str DEFAULT_TRAINING_DATA_OUTPUT_PATH = "training_data.json" logger = logging.getLogger(__name__) -class TrainingData(object): +class TrainingData: """Holds loaded intent and entity training data.""" # Validation will ensure and warn if these lower limits are not met @@ -38,34 +46,36 @@ def __init__( training_examples: Optional[List[Message]] = None, entity_synonyms: Optional[Dict[Text, Text]] = None, regex_features: Optional[List[Dict[Text, Text]]] = None, - lookup_tables: Optional[List[Dict[Text, Text]]] = None, - nlg_stories: Optional[Dict[Text, List[Text]]] = None, + lookup_tables: Optional[List[Dict[Text, Any]]] = None, + responses: Optional[Dict[Text, List[Dict[Text, Any]]]] = None, ) -> None: if training_examples: self.training_examples = self.sanitize_examples(training_examples) else: self.training_examples = [] - self.entity_synonyms = entity_synonyms if entity_synonyms else {} - self.regex_features = regex_features if regex_features else [] + self.entity_synonyms = entity_synonyms or {} + self.regex_features = regex_features or [] self.sort_regex_features() - self.lookup_tables = lookup_tables if lookup_tables else [] - self.nlg_stories = nlg_stories if nlg_stories else {} + self.lookup_tables = lookup_tables or [] + self.responses = responses or {} + + self._fill_response_phrases() def merge(self, *others: "TrainingData") -> "TrainingData": """Return merged instance of this data with other training data.""" - training_examples = deepcopy(self.training_examples) + training_examples = copy.deepcopy(self.training_examples) entity_synonyms = self.entity_synonyms.copy() - regex_features = deepcopy(self.regex_features) - lookup_tables = deepcopy(self.lookup_tables) - nlg_stories = deepcopy(self.nlg_stories) + regex_features = copy.deepcopy(self.regex_features) + lookup_tables = copy.deepcopy(self.lookup_tables) + responses = copy.deepcopy(self.responses) others = [other for other in others if other] for o in others: - training_examples.extend(deepcopy(o.training_examples)) - regex_features.extend(deepcopy(o.regex_features)) - lookup_tables.extend(deepcopy(o.lookup_tables)) + training_examples.extend(copy.deepcopy(o.training_examples)) + regex_features.extend(copy.deepcopy(o.regex_features)) + lookup_tables.extend(copy.deepcopy(o.lookup_tables)) for text, syn in o.entity_synonyms.items(): check_duplicate_synonym( @@ -73,36 +83,40 @@ def merge(self, *others: "TrainingData") -> "TrainingData": ) entity_synonyms.update(o.entity_synonyms) - nlg_stories.update(o.nlg_stories) + responses.update(o.responses) return TrainingData( training_examples, entity_synonyms, regex_features, lookup_tables, - nlg_stories, + responses, ) - def filter_by_intent(self, intent: Text): - """Filter training examples """ + def filter_training_examples( + self, condition: Callable[[Message], bool] + ) -> "TrainingData": + """Filter training examples. + + Args: + condition: A function that will be applied to filter training examples. - training_examples = [] - for ex in self.training_examples: - if ex.get("intent") == intent: - training_examples.append(ex) + Returns: + TrainingData: A TrainingData with filtered training examples. + """ return TrainingData( - training_examples, + list(filter(condition, self.training_examples)), self.entity_synonyms, self.regex_features, self.lookup_tables, + self.responses, ) def __hash__(self) -> int: from rasa.core import utils as core_utils - # Sort keys to ensure dictionary order in Python 3.5 - stringified = self.nlu_as_json(sort_keys=True) + self.nlg_as_markdown() + stringified = self.nlu_as_json() + self.nlg_as_markdown() text_hash = core_utils.get_text_hash(stringified) return int(text_hash, 16) @@ -111,70 +125,111 @@ def __hash__(self) -> int: def sanitize_examples(examples: List[Message]) -> List[Message]: """Makes sure the training data is clean. - removes trailing whitespaces from intent annotations.""" + Remove trailing whitespaces from intent and response annotations and drop + duplicate examples. + """ for ex in examples: - if ex.get("intent"): - ex.set("intent", ex.get("intent").strip()) - if ex.get("response"): - ex.set("response", ex.get("response").strip()) - return examples + if ex.get(INTENT): + ex.set(INTENT, ex.get(INTENT).strip()) + + if ex.get(RESPONSE): + ex.set(RESPONSE, ex.get(RESPONSE).strip()) - @rasa_utils.lazy_property + return list(OrderedDict.fromkeys(examples)) + + @lazy_property def intent_examples(self) -> List[Message]: - return [ex for ex in self.training_examples if ex.get("intent")] + return [ex for ex in self.training_examples if ex.get(INTENT)] - @rasa_utils.lazy_property + @lazy_property def response_examples(self) -> List[Message]: - return [ex for ex in self.training_examples if ex.get("response")] + return [ex for ex in self.training_examples if ex.get(RESPONSE)] - @rasa_utils.lazy_property + @lazy_property def entity_examples(self) -> List[Message]: - return [ex for ex in self.training_examples if ex.get("entities")] + return [ex for ex in self.training_examples if ex.get(ENTITIES)] - @rasa_utils.lazy_property + @lazy_property def intents(self) -> Set[Text]: """Returns the set of intents in the training data.""" - return set([ex.get("intent") for ex in self.training_examples]) - {None} - - @rasa_utils.lazy_property - def responses(self) -> Set[Text]: - """Returns the set of responses in the training data.""" - return set([ex.get("response") for ex in self.training_examples]) - {None} + return {ex.get(INTENT) for ex in self.training_examples} - {None} - @rasa_utils.lazy_property + @lazy_property def retrieval_intents(self) -> Set[Text]: """Returns the total number of response types in the training data""" - return set( - [ - ex.get("intent") - for ex in self.training_examples - if ex.get("response") is not None - ] - ) - - @rasa_utils.lazy_property - def examples_per_intent(self) -> Dict[Text, int]: + return { + ex.get(INTENT) + for ex in self.training_examples + if ex.get(RESPONSE) is not None + } + + @lazy_property + def number_of_examples_per_intent(self) -> Dict[Text, int]: """Calculates the number of examples per intent.""" - intents = [ex.get("intent") for ex in self.training_examples] + intents = [ex.get(INTENT) for ex in self.training_examples] return dict(Counter(intents)) - @rasa_utils.lazy_property - def examples_per_response(self) -> Dict[Text, int]: + @lazy_property + def number_of_examples_per_response(self) -> Dict[Text, int]: """Calculates the number of examples per response.""" - return dict(Counter(self.responses)) + responses = [ + ex.get(RESPONSE) for ex in self.training_examples if ex.get(RESPONSE) + ] + return dict(Counter(responses)) - @rasa_utils.lazy_property + @lazy_property def entities(self) -> Set[Text]: """Returns the set of entity types in the training data.""" - entity_types = [e.get("entity") for e in self.sorted_entities()] + entity_types = [e.get(ENTITY_ATTRIBUTE_TYPE) for e in self.sorted_entities()] return set(entity_types) - @rasa_utils.lazy_property - def examples_per_entity(self) -> Dict[Text, int]: + @lazy_property + def entity_roles(self) -> Set[Text]: + """Returns the set of entity roles in the training data.""" + entity_types = [ + e.get(ENTITY_ATTRIBUTE_ROLE) + for e in self.sorted_entities() + if ENTITY_ATTRIBUTE_ROLE in e + ] + return set(entity_types) - {NO_ENTITY_TAG} + + @lazy_property + def entity_groups(self) -> Set[Text]: + """Returns the set of entity groups in the training data.""" + entity_types = [ + e.get(ENTITY_ATTRIBUTE_GROUP) + for e in self.sorted_entities() + if ENTITY_ATTRIBUTE_GROUP in e + ] + return set(entity_types) - {NO_ENTITY_TAG} + + def entity_roles_groups_used(self) -> bool: + entity_groups_used = ( + self.entity_groups is not None and len(self.entity_groups) > 0 + ) + entity_roles_used = self.entity_roles is not None and len(self.entity_roles) > 0 + + return entity_groups_used or entity_roles_used + + @lazy_property + def number_of_examples_per_entity(self) -> Dict[Text, int]: """Calculates the number of examples per entity.""" - entity_types = [e.get("entity") for e in self.sorted_entities()] - return dict(Counter(entity_types)) + + entities = [] + + def _append_entity(entity: Dict[Text, Any], attribute: Text) -> None: + if attribute in entity: + _value = entity.get(attribute) + if _value is not None and _value != NO_ENTITY_TAG: + entities.append(f"{attribute} '{_value}'") + + for entity in self.sorted_entities(): + _append_entity(entity, ENTITY_ATTRIBUTE_TYPE) + _append_entity(entity, ENTITY_ATTRIBUTE_ROLE) + _append_entity(entity, ENTITY_ATTRIBUTE_GROUP) + + return dict(Counter(entities)) def sort_regex_features(self) -> None: """Sorts regex features lexicographically by name+pattern""" @@ -182,27 +237,21 @@ def sort_regex_features(self) -> None: self.regex_features, key=lambda e: "{}+{}".format(e["name"], e["pattern"]) ) - def fill_response_phrases(self): + def _fill_response_phrases(self) -> None: """Set response phrase for all examples by looking up NLG stories""" for example in self.training_examples: - response_key = example.get(MESSAGE_RESPONSE_KEY_ATTRIBUTE) - # if response_key is None, that means the corresponding intent is not a retrieval intent - # and hence no response text needs to be fetched. + # if response_key is None, that means the corresponding intent is not a + # retrieval intent and hence no response text needs to be fetched. # If response_key is set, fetch the corresponding response text - if response_key: - # look for corresponding bot utterance - story_lookup_intent = example.get_combined_intent_response_key() - assistant_utterances = self.nlg_stories.get(story_lookup_intent, []) - if assistant_utterances: - # selecting only first assistant utterance for now - example.set(MESSAGE_RESPONSE_ATTRIBUTE, assistant_utterances[0]) - else: - raise ValueError( - "No response phrases found for {}. Check training data " - "files for a possible wrong intent name in NLU/NLG file".format( - story_lookup_intent - ) - ) + if example.get(RESPONSE_KEY_ATTRIBUTE) is None: + continue + + # look for corresponding bot utterance + story_lookup_intent = example.get_combined_intent_response_key() + assistant_utterances = self.responses.get(story_lookup_intent, []) + if assistant_utterances: + # selecting only first assistant utterance for now + example.set(RESPONSE, assistant_utterances[0].get(TEXT)) def nlu_as_json(self, **kwargs: Any) -> Text: """Represent this set of training examples as json.""" @@ -212,23 +261,31 @@ def nlu_as_json(self, **kwargs: Any) -> Text: return RasaWriter().dumps(self, **kwargs) - def as_json(self) -> Text: - - logger.warning( - "DEPRECATION warning: function as_json() is deprecated and will be removed " - "in future versions. Use nlu_as_json() instead." - ) - - return self.nlu_as_json() - def nlg_as_markdown(self) -> Text: - """Generates the markdown representation of the response phrases(NLG) of TrainingData.""" + """Generates the markdown representation of the response phrases (NLG) of + TrainingData.""" + from rasa.nlu.training_data.formats import ( # pytype: disable=pyi-error NLGMarkdownWriter, ) return NLGMarkdownWriter().dumps(self) + def nlg_as_yaml(self) -> Text: + """Generates yaml representation of the response phrases (NLG) of TrainingData. + + Returns: + responses in yaml format as a string + """ + from rasa.nlu.training_data.formats.rasa_yaml import ( # pytype: disable=pyi-error + RasaYAMLWriter, + ) + + # only dump responses. at some point it might make sense to remove the + # differentiation between dumping NLU and dumping responses. but we + # can't do that until after we remove markdown support. + return RasaYAMLWriter().dumps(TrainingData(responses=self.responses)) + def nlu_as_markdown(self) -> Text: """Generates the markdown representation of the NLU part of TrainingData.""" from rasa.nlu.training_data.formats import ( # pytype: disable=pyi-error @@ -237,44 +294,63 @@ def nlu_as_markdown(self) -> Text: return MarkdownWriter().dumps(self) - def as_markdown(self) -> Text: - - logger.warning( - "DEPRECATION warning: function as_markdown() is deprecated and will be removed " - "in future versions. Use nlu_as_markdown() and nlg_as_markdown() instead" + def nlu_as_yaml(self) -> Text: + from rasa.nlu.training_data.formats.rasa_yaml import ( # pytype: disable=pyi-error + RasaYAMLWriter, ) - return self.nlu_as_markdown() + # avoid dumping NLG data (responses). this is a workaround until we + # can remove the distinction between nlu & nlg when converting to a string + # (so until after we remove markdown support) + no_responses_training_data = copy.copy(self) + no_responses_training_data.responses = {} + + return RasaYAMLWriter().dumps(no_responses_training_data) - def persist_nlu(self, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH): + def persist_nlu(self, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH) -> None: - if filename.endswith("json"): + if Path(filename).suffix in JSON_FILE_EXTENSIONS: rasa.nlu.utils.write_to_file(filename, self.nlu_as_json(indent=2)) - elif filename.endswith("md"): + elif Path(filename).suffix in MARKDOWN_FILE_EXTENSIONS: rasa.nlu.utils.write_to_file(filename, self.nlu_as_markdown()) + elif Path(filename).suffix in YAML_FILE_EXTENSIONS: + rasa.nlu.utils.write_to_file(filename, self.nlu_as_yaml()) else: ValueError( "Unsupported file format detected. Supported file formats are 'json' " "and 'md'." ) - def persist_nlg(self, filename): - - nlg_serialized_data = self.nlg_as_markdown() - if nlg_serialized_data == "": - return - - rasa.nlu.utils.write_to_file(filename, self.nlg_as_markdown()) + def persist_nlg(self, filename: Text) -> None: + if Path(filename).suffix in YAML_FILE_EXTENSIONS: + rasa.nlu.utils.write_to_file(filename, self.nlg_as_yaml()) + elif Path(filename).suffix in MARKDOWN_FILE_EXTENSIONS: + nlg_serialized_data = self.nlg_as_markdown() + if nlg_serialized_data: + rasa.nlu.utils.write_to_file(filename, nlg_serialized_data) + else: + ValueError( + "Unsupported file format detected. Supported file formats are 'md' " + "and 'yml'." + ) @staticmethod - def get_nlg_persist_filename(nlu_filename): - + def get_nlg_persist_filename(nlu_filename: Text) -> Text: + + extension = Path(nlu_filename).suffix + if extension in JSON_FILE_EXTENSIONS: + # backwards compatibility: previously NLG was always dumped as md. now + # we are going to dump in the same format as the NLU data. unfortunately + # there is a special case: NLU is in json format, in this case we use + # md as we do not have a NLG json format + extension = "md" # Add nlg_ as prefix and change extension to .md - filename = os.path.join( - os.path.dirname(nlu_filename), - "nlg_" + os.path.splitext(os.path.basename(nlu_filename))[0] + ".md", + filename = ( + Path(nlu_filename) + .with_name("nlg_" + Path(nlu_filename).name) + .with_suffix("." + extension) ) - return filename + return str(filename) def persist( self, dir_name: Text, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH @@ -303,7 +379,7 @@ def sorted_intent_examples(self) -> List[Message]: """Sorts the intent examples by the name of the intent and then response""" return sorted( - self.intent_examples, key=lambda e: (e.get("intent"), e.get("response")) + self.intent_examples, key=lambda e: (e.get(INTENT), e.get(RESPONSE)) ) def validate(self) -> None: @@ -313,114 +389,179 @@ def validate(self) -> None: logger.debug("Validating training data...") if "" in self.intents: - warnings.warn( + raise_warning( "Found empty intent, please check your " "training data. This may result in wrong " "intent predictions." ) if "" in self.responses: - warnings.warn( + raise_warning( "Found empty response, please check your " "training data. This may result in wrong " "response predictions." ) # emit warnings for intents with only a few training samples - for intent, count in self.examples_per_intent.items(): + for intent, count in self.number_of_examples_per_intent.items(): if count < self.MIN_EXAMPLES_PER_INTENT: - warnings.warn( - "Intent '{}' has only {} training examples! " - "Minimum is {}, training may fail.".format( - intent, count, self.MIN_EXAMPLES_PER_INTENT - ) + raise_warning( + f"Intent '{intent}' has only {count} training examples! " + f"Minimum is {self.MIN_EXAMPLES_PER_INTENT}, training may fail." ) # emit warnings for entities with only a few training samples - for entity_type, count in self.examples_per_entity.items(): + for entity, count in self.number_of_examples_per_entity.items(): if count < self.MIN_EXAMPLES_PER_ENTITY: - warnings.warn( - "Entity '{}' has only {} training examples! " - "minimum is {}, training may fail." - "".format(entity_type, count, self.MIN_EXAMPLES_PER_ENTITY) + raise_warning( + f"Entity {entity} has only {count} training examples! " + f"The minimum is {self.MIN_EXAMPLES_PER_ENTITY}, because of " + f"this the training may fail." + ) + + # emit warnings for response intents without a response template + for example in self.training_examples: + if example.get(RESPONSE_KEY_ATTRIBUTE): + raise_warning( + f"Your training data contains an example '{example.text[:20]}...' " + f"for the {example.get_combined_intent_response_key()} intent. " + f"You either need to add a response phrase or correct the " + f"intent for this example in your training data." ) def train_test_split( - self, train_frac: float = 0.8 + self, train_frac: float = 0.8, random_seed: Optional[int] = None ) -> Tuple["TrainingData", "TrainingData"]: """Split into a training and test dataset, preserving the fraction of examples per intent.""" # collect all nlu data - test, train = self.split_nlu_examples(train_frac) + test, train = self.split_nlu_examples(train_frac, random_seed) # collect all nlg stories - test_nlg_stories, train_nlg_stories = self.split_nlg_responses(test, train) + test_responses = self._needed_responses_for_examples(test) + train_responses = self._needed_responses_for_examples(train) data_train = TrainingData( train, entity_synonyms=self.entity_synonyms, regex_features=self.regex_features, lookup_tables=self.lookup_tables, - nlg_stories=train_nlg_stories, + responses=train_responses, ) - data_train.fill_response_phrases() data_test = TrainingData( test, entity_synonyms=self.entity_synonyms, regex_features=self.regex_features, lookup_tables=self.lookup_tables, - nlg_stories=test_nlg_stories, + responses=test_responses, ) - data_test.fill_response_phrases() return data_train, data_test - def split_nlg_responses(self, test, train): + def _needed_responses_for_examples( + self, examples: List[Message] + ) -> Dict[Text, List[Dict[Text, Any]]]: + """Get all responses used in any of the examples. - train_nlg_stories = self.build_nlg_stories_from_examples(train) - test_nlg_stories = self.build_nlg_stories_from_examples(test) - return test_nlg_stories, train_nlg_stories + Args: + examples: messages to select responses by. - @staticmethod - def build_nlg_stories_from_examples(examples): + Returns: + All responses that appear at least once in the list of examples. + """ - nlg_stories = {} + responses = {} for ex in examples: - if ex.get(MESSAGE_RESPONSE_KEY_ATTRIBUTE) and ex.get( - MESSAGE_RESPONSE_ATTRIBUTE - ): - nlg_stories[ex.get_combined_intent_response_key()] = [ - ex.get(MESSAGE_RESPONSE_ATTRIBUTE) - ] - return nlg_stories - - def split_nlu_examples(self, train_frac): + if ex.get(RESPONSE_KEY_ATTRIBUTE) and ex.get(RESPONSE): + key = ex.get_combined_intent_response_key() + responses[key] = self.responses[key] + return responses + + def split_nlu_examples( + self, train_frac: float, random_seed: Optional[int] = None + ) -> Tuple[list, list]: + """Split the training data into a train and test set. + + Args: + train_frac: percentage of examples to add to the training set. + random_seed: random seed + + Returns: + Test and training examples. + """ train, test = [], [] - for intent, count in self.examples_per_intent.items(): - ex = [e for e in self.intent_examples if e.data["intent"] == intent] - random.shuffle(ex) - n_train = int(count * train_frac) - train.extend(ex[:n_train]) - test.extend(ex[n_train:]) + training_examples = set(self.training_examples) + + def _split(_examples: List[Message], _count: int) -> None: + if random_seed is not None: + random.Random(random_seed).shuffle(_examples) + else: + random.shuffle(_examples) + + n_train = int(_count * train_frac) + train.extend(_examples[:n_train]) + test.extend(_examples[n_train:]) + + # to make sure we have at least one example per response and intent in the + # training/test data, we first go over the response examples and then go over + # intent examples + + for response, count in self.number_of_examples_per_response.items(): + examples = [ + e + for e in training_examples + if RESPONSE in e.data and e.data[RESPONSE] == response + ] + _split(examples, count) + training_examples = training_examples - set(examples) + + for intent, count in self.number_of_examples_per_intent.items(): + examples = [ + e + for e in training_examples + if INTENT in e.data and e.data[INTENT] == intent + ] + _split(examples, count) + training_examples = training_examples - set(examples) + return test, train def print_stats(self) -> None: - logger.info( - "Training data stats: \n" - + "\t- intent examples: {} ({} distinct intents)\n".format( - len(self.intent_examples), len(self.intents) - ) - + "\t- Found intents: {}\n".format(list_to_str(self.intents)) - + "\t- Number of response examples: {} ({} distinct response)\n".format( - len(self.response_examples), len(self.responses) - ) - + "\t- entity examples: {} ({} distinct entities)\n".format( - len(self.entity_examples), len(self.entities) + number_of_examples_for_each_intent = [] + for intent_name, example_count in self.number_of_examples_per_intent.items(): + number_of_examples_for_each_intent.append( + f"intent: {intent_name}, training examples: {example_count} " ) - + "\t- found entities: {}\n".format(list_to_str(self.entities)) + newline = "\n" + + logger.info("Training data stats:") + logger.info( + f"Number of intent examples: {len(self.intent_examples)} " + f"({len(self.intents)} distinct intents)" + "\n" + ) + # log the number of training examples per intent + + logger.debug(f"{newline.join(number_of_examples_for_each_intent)}") + + if self.intents: + logger.info(f" Found intents: {list_to_str(self.intents)}") + logger.info( + f"Number of response examples: {len(self.response_examples)} " + f"({len(self.responses)} distinct responses)" + ) + logger.info( + f"Number of entity examples: {len(self.entity_examples)} " + f"({len(self.entities)} distinct entities)" ) + if self.entities: + logger.info(f" Found entity types: {list_to_str(self.entities)}") + if self.entity_roles: + logger.info(f" Found entity roles: {list_to_str(self.entity_roles)}") + if self.entity_groups: + logger.info(f" Found entity groups: {list_to_str(self.entity_groups)}") def is_empty(self) -> bool: """Checks if any training data was loaded.""" @@ -431,4 +572,4 @@ def is_empty(self) -> bool: self.regex_features, self.lookup_tables, ] - return not any([len(l) > 0 for l in lists_to_check]) + return not any([len(lst) > 0 for lst in lists_to_check]) diff --git a/rasa/nlu/training_data/util.py b/rasa/nlu/training_data/util.py index b7533c378fb5..981e18e1804d 100644 --- a/rasa/nlu/training_data/util.py +++ b/rasa/nlu/training_data/util.py @@ -1,15 +1,18 @@ -# -*- coding: utf-8 -*- - +import json import logging import os -from typing import Text +from typing import Any, Dict, Optional, Text import rasa.utils.io as io_utils +from rasa.nlu.constants import ENTITIES, EXTRACTOR, PRETRAINED_EXTRACTORS +from rasa.utils.common import raise_warning logger = logging.getLogger(__name__) -def transform_entity_synonyms(synonyms, known_synonyms=None): +def transform_entity_synonyms( + synonyms, known_synonyms: Optional[Dict[Text, Any]] = None +) -> Dict[Text, Any]: """Transforms the entity synonyms into a text->value dictionary""" entity_synonyms = known_synonyms if known_synonyms else {} for s in synonyms: @@ -19,13 +22,14 @@ def transform_entity_synonyms(synonyms, known_synonyms=None): return entity_synonyms -def check_duplicate_synonym(entity_synonyms, text, syn, context_str=""): +def check_duplicate_synonym( + entity_synonyms: Dict[Text, Any], text: Text, syn: Text, context_str: Text = "" +) -> None: if text in entity_synonyms and entity_synonyms[text] != syn: - logger.warning( - "Found inconsistent entity synonyms while {0}, " - "overwriting {1}->{2} " - "with {1}->{3} during merge" - "".format(context_str, text, entity_synonyms[text], syn) + raise_warning( + f"Found inconsistent entity synonyms while {context_str}, " + f"overwriting {text}->{entity_synonyms[text]} " + f"with {text}->{syn} during merge." ) @@ -33,7 +37,7 @@ def get_file_format(resource_name: Text) -> Text: from rasa.nlu.training_data import loading if resource_name is None or not os.path.exists(resource_name): - raise AttributeError("Resource '{}' does not exist.".format(resource_name)) + raise AttributeError(f"Resource '{resource_name}' does not exist.") files = io_utils.list_files(resource_name) @@ -43,7 +47,41 @@ def get_file_format(resource_name: Text) -> Text: return "json" fformat = file_formats[0] - if fformat == "md" and all(f == fformat for f in file_formats): + if fformat in [loading.MARKDOWN, loading.RASA_YAML] and all( + f == fformat for f in file_formats + ): return fformat return "json" + + +def remove_untrainable_entities_from(example: Dict[Text, Any]) -> None: + """Remove untrainable entities from serialised training example `example`. + + Entities with an untrainable extractor will be removed. Untrainable extractors + are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`. + + Args: + example: Serialised training example to inspect. + """ + + example_entities = example.get(ENTITIES) + + if not example_entities: + # example contains no entities, so there's nothing to do + return None + + trainable_entities = [] + + for entity in example_entities: + if entity.get(EXTRACTOR) in PRETRAINED_EXTRACTORS: + logger.debug( + f"Excluding entity '{json.dumps(entity)}' from training data. " + f"Entity examples extracted by the following classes are not " + f"dumped to training data in markdown format: " + f"`{'`, `'.join(sorted(PRETRAINED_EXTRACTORS))}`." + ) + else: + trainable_entities.append(entity) + + example[ENTITIES] = trainable_entities diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index e50f38cdc596..676ee30f99c1 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -1,25 +1,33 @@ -import io import json import os import re from typing import Any, Dict, List, Optional, Text +from pathlib import Path + +import rasa.utils.io as io_utils # backwards compatibility 1.0.x # noinspection PyUnresolvedReferences from rasa.utils.io import read_json_file +from rasa.nlu.constants import ( + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_GROUP, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_START, + ENTITY_ATTRIBUTE_VALUE, +) -def relative_normpath(f: Optional[Text], path: Text) -> Optional[Text]: +def relative_normpath(f: Optional[Text], path: Text) -> Optional[Path]: """Return the path of file relative to `path`.""" - if f is not None: - return os.path.normpath(os.path.relpath(f, path)) - else: - return None + return Path(os.path.relpath(f, path)) + return None -def list_to_str(l: List[Text], delim: Text = ", ", quote: Text = "'") -> Text: - return delim.join([quote + e + quote for e in l]) +def list_to_str(lst: List[Text], delim: Text = ", ", quote: Text = "'") -> Text: + return delim.join([quote + e + quote for e in lst]) def ordered(obj: Any) -> Any: @@ -48,21 +56,49 @@ def write_json_to_file(filename: Text, obj: Any, **kwargs: Any) -> None: write_to_file(filename, json_to_string(obj, **kwargs)) -def write_to_file(filename: Text, text: Text) -> None: +def write_to_file(filename: Text, text: Any) -> None: """Write a text to a file.""" - with io.open(filename, "w", encoding="utf-8") as f: - f.write(str(text)) + io_utils.write_text_file(str(text), filename) def build_entity( - start: int, end: int, value: Text, entity_type: Text, **kwargs: Dict[Text, Any] + start: int, + end: int, + value: Text, + entity_type: Text, + role: Optional[Text] = None, + group: Optional[Text] = None, + **kwargs: Any, ) -> Dict[Text, Any]: """Builds a standard entity dictionary. - Adds additional keyword parameters.""" - - entity = {"start": start, "end": end, "value": value, "entity": entity_type} + Adds additional keyword parameters. + + Args: + start: start position of entity + end: end position of entity + value: text value of the entity + entity_type: name of the entity type + role: role of the entity + group: group of the entity + **kwargs: additional parameters + + Returns: + an entity dictionary + """ + + entity = { + ENTITY_ATTRIBUTE_START: start, + ENTITY_ATTRIBUTE_END: end, + ENTITY_ATTRIBUTE_VALUE: value, + ENTITY_ATTRIBUTE_TYPE: entity_type, + } + + if role: + entity[ENTITY_ATTRIBUTE_ROLE] = role + if group: + entity[ENTITY_ATTRIBUTE_GROUP] = group entity.update(kwargs) return entity @@ -104,25 +140,3 @@ def remove_model(model_dir: Text) -> bool: "Cannot remove {}, it seems it is not a model " "directory".format(model_dir) ) - - -def json_unpickle(file_name: Text) -> Any: - """Unpickle an object from file using json.""" - import jsonpickle.ext.numpy as jsonpickle_numpy - import jsonpickle - - jsonpickle_numpy.register_handlers() - - with open(file_name, "r", encoding="utf-8") as f: - return jsonpickle.loads(f.read()) - - -def json_pickle(file_name: Text, obj: Any) -> None: - """Pickle an object to a file using json.""" - import jsonpickle.ext.numpy as jsonpickle_numpy - import jsonpickle - - jsonpickle_numpy.register_handlers() - - with open(file_name, "w", encoding="utf-8") as f: - f.write(jsonpickle.dumps(obj)) diff --git a/rasa/nlu/utils/bilou_utils.py b/rasa/nlu/utils/bilou_utils.py new file mode 100644 index 000000000000..c563cef52dc6 --- /dev/null +++ b/rasa/nlu/utils/bilou_utils.py @@ -0,0 +1,329 @@ +import logging +from typing import List, Tuple, Text, Optional, Dict, Any + +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.training_data import Message +from rasa.nlu.training_data import TrainingData +from rasa.nlu.constants import ( + ENTITIES, + TOKENS_NAMES, + TEXT, + BILOU_ENTITIES, + NO_ENTITY_TAG, + ENTITY_ATTRIBUTE_TYPE, + ENTITY_ATTRIBUTE_END, + ENTITY_ATTRIBUTE_START, + BILOU_ENTITIES_GROUP, + BILOU_ENTITIES_ROLE, + ENTITY_ATTRIBUTE_ROLE, + ENTITY_ATTRIBUTE_GROUP, +) + +logger = logging.getLogger(__name__) + +BEGINNING = "B-" +INSIDE = "I-" +LAST = "L-" +UNIT = "U-" +BILOU_PREFIXES = [BEGINNING, INSIDE, LAST, UNIT] + + +def bilou_prefix_from_tag(tag: Text) -> Optional[Text]: + """Returns the BILOU prefix from the given tag. + + Args: + tag: the tag + + Returns: the BILOU prefix of the tag + """ + if tag[:2] in BILOU_PREFIXES: + return tag[:2] + return None + + +def tag_without_prefix(tag: Text) -> Text: + """Remove the BILOU prefix from the given tag. + + Args: + tag: the tag + + Returns: the tag without the BILOU prefix + """ + if tag[:2] in BILOU_PREFIXES: + return tag[2:] + return tag + + +def bilou_tags_to_ids( + message: Message, + tag_id_dict: Dict[Text, int], + tag_name: Text = ENTITY_ATTRIBUTE_TYPE, +) -> List[int]: + """Maps the entity tags of the message to the ids of the provided dict. + + Args: + message: the message + tag_id_dict: mapping of tags to ids + tag_name: tag name of interest + + Returns: a list of tag ids + """ + bilou_key = get_bilou_key_for_tag(tag_name) + + if message.get(bilou_key): + _tags = [ + tag_id_dict[_tag] if _tag in tag_id_dict else tag_id_dict[NO_ENTITY_TAG] + for _tag in message.get(bilou_key) + ] + else: + _tags = [tag_id_dict[NO_ENTITY_TAG] for _ in message.get(TOKENS_NAMES[TEXT])] + + return _tags + + +def get_bilou_key_for_tag(tag_name: Text) -> Text: + """Get the message key for the BILOU tagging format of the provided tag name. + + Args: + tag_name: the tag name + + Returns: + the message key to store the BILOU tags + """ + if tag_name == ENTITY_ATTRIBUTE_ROLE: + return BILOU_ENTITIES_ROLE + + if tag_name == ENTITY_ATTRIBUTE_GROUP: + return BILOU_ENTITIES_GROUP + + return BILOU_ENTITIES + + +def remove_bilou_prefixes(tags: List[Text]) -> List[Text]: + """Removes the BILOU prefixes from the given list of tags. + + Args: + tags: the list of tags + + Returns: + list of tags without BILOU prefix + """ + return [tag_without_prefix(t) for t in tags] + + +def build_tag_id_dict( + training_data: TrainingData, tag_name: Text = ENTITY_ATTRIBUTE_TYPE +) -> Optional[Dict[Text, int]]: + """Create a mapping of unique tags to ids. + + Args: + training_data: the training data + tag_name: tag name of interest + + Returns: a mapping of tags to ids + """ + bilou_key = get_bilou_key_for_tag(tag_name) + + distinct_tags = set( + [ + tag_without_prefix(e) + for example in training_data.training_examples + if example.get(bilou_key) + for e in example.get(bilou_key) + ] + ) - {NO_ENTITY_TAG} + + if not distinct_tags: + return None + + tag_id_dict = { + f"{prefix}{tag}": idx_1 * len(BILOU_PREFIXES) + idx_2 + 1 + for idx_1, tag in enumerate(sorted(distinct_tags)) + for idx_2, prefix in enumerate(BILOU_PREFIXES) + } + # NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index + # needed for correct prediction for padding + tag_id_dict[NO_ENTITY_TAG] = 0 + + return tag_id_dict + + +def apply_bilou_schema(training_data: TrainingData) -> None: + """Get a list of BILOU entity tags and set them on the given messages. + + Args: + training_data: the training data + """ + for message in training_data.training_examples: + entities = message.get(ENTITIES) + + if not entities: + continue + + tokens = message.get(TOKENS_NAMES[TEXT]) + + for attribute, message_key in [ + (ENTITY_ATTRIBUTE_TYPE, BILOU_ENTITIES), + (ENTITY_ATTRIBUTE_ROLE, BILOU_ENTITIES_ROLE), + (ENTITY_ATTRIBUTE_GROUP, BILOU_ENTITIES_GROUP), + ]: + entities = map_message_entities(message, attribute) + output = bilou_tags_from_offsets(tokens, entities) + message.set(message_key, output) + + +def map_message_entities( + message: Message, attribute_key: Text = ENTITY_ATTRIBUTE_TYPE +) -> List[Tuple[int, int, Text]]: + """Maps the entities of the given message to their start, end, and tag values. + + Args: + message: the message + attribute_key: key of tag value to use + + Returns: a list of start, end, and tag value tuples + """ + + def convert_entity(entity: Dict[Text, Any]) -> Tuple[int, int, Text]: + return ( + entity[ENTITY_ATTRIBUTE_START], + entity[ENTITY_ATTRIBUTE_END], + entity.get(attribute_key) or NO_ENTITY_TAG, + ) + + entities = [convert_entity(entity) for entity in message.get(ENTITIES, [])] + + # entities is a list of tuples (start, end, tag value). + # filter out all entities with tag value == NO_ENTITY_TAG. + tag_value_idx = 2 + return [entity for entity in entities if entity[tag_value_idx] != NO_ENTITY_TAG] + + +def bilou_tags_from_offsets( + tokens: List[Token], entities: List[Tuple[int, int, Text]] +) -> List[Text]: + """Creates BILOU tags for the given tokens and entities. + + Args: + message: The message object. + tokens: The list of tokens. + entities: The list of start, end, and tag tuples. + missing: The tag for missing entities. + + Returns: + BILOU tags. + """ + start_pos_to_token_idx = {token.start: i for i, token in enumerate(tokens)} + end_pos_to_token_idx = {token.end: i for i, token in enumerate(tokens)} + + bilou = [NO_ENTITY_TAG for _ in tokens] + + _add_bilou_tags_to_entities( + bilou, entities, end_pos_to_token_idx, start_pos_to_token_idx + ) + + return bilou + + +def _add_bilou_tags_to_entities( + bilou: List[Text], + entities: List[Tuple[int, int, Text]], + end_pos_to_token_idx: Dict[int, int], + start_pos_to_token_idx: Dict[int, int], +): + for start_pos, end_pos, label in entities: + start_token_idx = start_pos_to_token_idx.get(start_pos) + end_token_idx = end_pos_to_token_idx.get(end_pos) + + # Only interested if the tokenization is correct + if start_token_idx is not None and end_token_idx is not None: + if start_token_idx == end_token_idx: + bilou[start_token_idx] = f"{UNIT}{label}" + else: + bilou[start_token_idx] = f"{BEGINNING}{label}" + for i in range(start_token_idx + 1, end_token_idx): + bilou[i] = f"{INSIDE}{label}" + bilou[end_token_idx] = f"{LAST}{label}" + + +def ensure_consistent_bilou_tagging(predicted_tags: List[Text]) -> List[Text]: + """ + Ensure predicted tags follow the BILOU tagging schema. + + We assume that starting B- tags are correct. Followed tags that belong to start + tag but have a different entity type are updated. + For example, B-a I-b L-a is updated to B-a I-a L-a and B-a I-a O is changed to + B-a L-a. + + Args: + predicted_tags: predicted tags + + Return: + List of tags. + """ + + for idx, predicted_tag in enumerate(predicted_tags): + prefix = bilou_prefix_from_tag(predicted_tag) + tag = tag_without_prefix(predicted_tag) + + if prefix == BEGINNING: + last_idx = _find_bilou_end(idx, predicted_tags) + + # ensure correct BILOU annotations + if last_idx == idx: + predicted_tags[idx] = f"{UNIT}{tag}" + elif last_idx - idx == 1: + predicted_tags[idx] = f"{BEGINNING}{tag}" + predicted_tags[last_idx] = f"{LAST}{tag}" + else: + predicted_tags[idx] = f"{BEGINNING}{tag}" + predicted_tags[last_idx] = f"{LAST}{tag}" + for i in range(idx + 1, last_idx): + predicted_tags[i] = f"{INSIDE}{tag}" + + return predicted_tags + + +def _find_bilou_end(start_idx: int, predicted_tags: List[Text]) -> int: + current_idx = start_idx + 1 + finished = False + start_tag = tag_without_prefix(predicted_tags[start_idx]) + + while not finished: + if current_idx >= len(predicted_tags): + logger.debug( + "Inconsistent BILOU tagging found, B- tag not closed by L- tag, " + "i.e [B-a, I-a, O] instead of [B-a, L-a, O].\n" + "Assuming last tag is L- instead of I-." + ) + current_idx -= 1 + break + + current_label = predicted_tags[current_idx] + prefix = bilou_prefix_from_tag(current_label) + tag = tag_without_prefix(current_label) + + if tag != start_tag: + # words are not tagged the same entity class + logger.debug( + "Inconsistent BILOU tagging found, B- tag, L- tag pair encloses " + "multiple entity classes.i.e. [B-a, I-b, L-a] instead of " + "[B-a, I-a, L-a].\nAssuming B- class is correct." + ) + + if prefix == LAST: + finished = True + elif prefix == INSIDE: + # middle part of the entity + current_idx += 1 + else: + # entity not closed by an L- tag + finished = True + current_idx -= 1 + logger.debug( + "Inconsistent BILOU tagging found, B- tag not closed by L- tag, " + "i.e [B-a, I-a, O] instead of [B-a, L-a, O].\n" + "Assuming last tag is L- instead of I-." + ) + + return current_idx diff --git a/rasa/nlu/utils/hugging_face/__init__.py b/rasa/nlu/utils/hugging_face/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rasa/nlu/utils/hugging_face/hf_transformers.py b/rasa/nlu/utils/hugging_face/hf_transformers.py new file mode 100644 index 000000000000..0907a004a8be --- /dev/null +++ b/rasa/nlu/utils/hugging_face/hf_transformers.py @@ -0,0 +1,542 @@ +import logging +from typing import Any, Dict, List, Text, Tuple, Optional + +from rasa.core.utils import get_dict_hash +from rasa.nlu.model import Metadata +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.components import Component +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.tokenizers.tokenizer import Token +import rasa.utils.train_utils as train_utils +import numpy as np + +from rasa.nlu.constants import ( + TEXT, + LANGUAGE_MODEL_DOCS, + DENSE_FEATURIZABLE_ATTRIBUTES, + TOKEN_IDS, + TOKENS, + SENTENCE_FEATURES, + SEQUENCE_FEATURES, + NUMBER_OF_SUB_TOKENS, +) + +logger = logging.getLogger(__name__) + + +class HFTransformersNLP(Component): + """Utility Component for interfacing between Transformers library and Rasa OS. + + The transformers(https://github.com/huggingface/transformers) library + is used to load pre-trained language models like BERT, GPT-2, etc. + The component also tokenizes and featurizes dense featurizable attributes of each + message. + """ + + defaults = { + # name of the language model to load. + "model_name": "bert", + # Pre-Trained weights to be loaded(string) + "model_weights": None, + # an optional path to a specific directory to download + # and cache the pre-trained model weights. + "cache_dir": None, + } + + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + super(HFTransformersNLP, self).__init__(component_config) + + self._load_model() + self.whitespace_tokenizer = WhitespaceTokenizer() + + def _load_model(self) -> None: + """Try loading the model""" + + from rasa.nlu.utils.hugging_face.registry import ( + model_class_dict, + model_weights_defaults, + model_tokenizer_dict, + ) + + self.model_name = self.component_config["model_name"] + + if self.model_name not in model_class_dict: + raise KeyError( + f"'{self.model_name}' not a valid model name. Choose from " + f"{str(list(model_class_dict.keys()))}or create" + f"a new class inheriting from this class to support your model." + ) + + self.model_weights = self.component_config["model_weights"] + self.cache_dir = self.component_config["cache_dir"] + + if not self.model_weights: + logger.info( + f"Model weights not specified. Will choose default model weights: " + f"{model_weights_defaults[self.model_name]}" + ) + self.model_weights = model_weights_defaults[self.model_name] + + logger.debug(f"Loading Tokenizer and Model for {self.model_name}") + + self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained( + self.model_weights, cache_dir=self.cache_dir + ) + self.model = model_class_dict[self.model_name].from_pretrained( + self.model_weights, cache_dir=self.cache_dir + ) + + # Use a universal pad token since all transformer architectures do not have a + # consistent token. Instead of pad_token_id we use unk_token_id because + # pad_token_id is not set for all architectures. We can't add a new token as + # well since vocabulary resizing is not yet supported for TF classes. + # Also, this does not hurt the model predictions since we use an attention mask + # while feeding input. + self.pad_token_id = self.tokenizer.unk_token_id + + @classmethod + def cache_key( + cls, component_meta: Dict[Text, Any], model_metadata: Metadata + ) -> Optional[Text]: + + weights = component_meta.get("model_weights") or {} + + return f"{cls.name}-{component_meta.get('model_name')}-{get_dict_hash(weights)}" + + @classmethod + def required_packages(cls) -> List[Text]: + return ["transformers"] + + def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]: + """ + Pass the text through the tokenizer of the language model. + + Args: + text: Text to be tokenized. + + Returns: + List of token ids and token strings. + + """ + split_token_ids = self.tokenizer.encode(text, add_special_tokens=False) + + split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids) + + return split_token_ids, split_token_strings + + def _add_lm_specific_special_tokens( + self, token_ids: List[List[int]] + ) -> List[List[int]]: + """Add language model specific special tokens which were used during their + training. + + Args: + token_ids: List of token ids for each example in the batch. + + Returns: + Augmented list of token ids for each example in the batch. + """ + from rasa.nlu.utils.hugging_face.registry import ( + model_special_tokens_pre_processors, + ) + + augmented_tokens = [ + model_special_tokens_pre_processors[self.model_name](example_token_ids) + for example_token_ids in token_ids + ] + return augmented_tokens + + def _lm_specific_token_cleanup( + self, split_token_ids: List[int], token_strings: List[Text] + ) -> Tuple[List[int], List[Text]]: + """Clean up special chars added by tokenizers of language models. + + Many language models add a special char in front/back of (some) words. We clean + up those chars as they are not + needed once the features are already computed. + + Args: + split_token_ids: List of token ids received as output from the language + model specific tokenizer. + token_strings: List of token strings received as output from the language + model specific tokenizer. + + Returns: + Cleaned up token ids and token strings. + """ + from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners + + return model_tokens_cleaners[self.model_name](split_token_ids, token_strings) + + def _post_process_sequence_embeddings( + self, sequence_embeddings: np.ndarray + ) -> Tuple[np.ndarray, np.ndarray]: + """Compute sentence level representations and sequence level representations + for relevant tokens. + + Args: + sequence_embeddings: Sequence level dense features received as output from + language model. + + Returns: + Sentence and sequence level representations. + """ + + from rasa.nlu.utils.hugging_face.registry import ( + model_embeddings_post_processors, + ) + + sentence_embeddings = [] + post_processed_sequence_embeddings = [] + + for example_embedding in sequence_embeddings: + ( + example_sentence_embedding, + example_post_processed_embedding, + ) = model_embeddings_post_processors[self.model_name](example_embedding) + + sentence_embeddings.append(example_sentence_embedding) + post_processed_sequence_embeddings.append(example_post_processed_embedding) + + return ( + np.array(sentence_embeddings), + np.array(post_processed_sequence_embeddings), + ) + + def _tokenize_example( + self, message: Message, attribute: Text + ) -> Tuple[List[Token], List[int]]: + """Tokenize a single message example. + + Many language models add a special char in front of (some) words and split + words into sub-words. To ensure the entity start and end values matches the + token values, tokenize the text first using the whitespace tokenizer. If + individual tokens are split up into multiple tokens, we add this information + to the respected token. + + Args: + message: Single message object to be processed. + attribute: Property of message to be processed, one of ``TEXT`` or + ``RESPONSE``. + + Returns: + List of token strings and token ids for the corresponding attribute of the + message. + """ + + tokens_in = self.whitespace_tokenizer.tokenize(message, attribute) + + tokens_out = [] + + token_ids_out = [] + + for token in tokens_in: + # use lm specific tokenizer to further tokenize the text + split_token_ids, split_token_strings = self._lm_tokenize(token.text) + + split_token_ids, split_token_strings = self._lm_specific_token_cleanup( + split_token_ids, split_token_strings + ) + + token_ids_out += split_token_ids + + token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings)) + + tokens_out.append(token) + + return tokens_out, token_ids_out + + def _get_token_ids_for_batch( + self, batch_examples: List[Message], attribute: Text + ) -> Tuple[List[List[Token]], List[List[int]]]: + """Compute token ids and token strings for each example in batch. + + A token id is the id of that token in the vocabulary of the language model. + Args: + batch_examples: Batch of message objects for which tokens need to be + computed. + attribute: Property of message to be processed, one of ``TEXT`` or + ``RESPONSE``. + + Returns: + List of token strings and token ids for each example in the batch. + """ + + batch_token_ids = [] + batch_tokens = [] + for example in batch_examples: + + example_tokens, example_token_ids = self._tokenize_example( + example, attribute + ) + batch_tokens.append(example_tokens) + batch_token_ids.append(example_token_ids) + + return batch_tokens, batch_token_ids + + @staticmethod + def _compute_attention_mask(actual_sequence_lengths: List[int]) -> np.ndarray: + """Compute a mask for padding tokens. + + This mask will be used by the language model so that it does not attend to + padding tokens. + + Args: + actual_sequence_lengths: List of length of each example without any padding + + Returns: + Computed attention mask, 0 for padding and 1 for non-padding tokens. + """ + + attention_mask = [] + max_seq_length = max(actual_sequence_lengths) + for actual_sequence_length in actual_sequence_lengths: + # add 1s for present tokens, fill up the remaining space up to max + # sequence length with 0s (non-existing tokens) + padded_sequence = [1] * actual_sequence_length + [0] * ( + max_seq_length - actual_sequence_length + ) + attention_mask.append(padded_sequence) + + attention_mask = np.array(attention_mask).astype(np.float32) + + return attention_mask + + def _add_padding_to_batch( + self, batch_token_ids: List[List[int]] + ) -> Tuple[List[int], List[List[int]]]: + """Add padding so that all examples in the batch are of the same length. + + Args: + batch_token_ids: Batch of examples where each example is a non-padded list + of token ids. + + Returns: + Padded batch with all examples of the same length. + """ + padded_token_ids = [] + # Compute max length across examples + max_seq_len = 0 + actual_sequence_lengths = [] + + for example_token_ids in batch_token_ids: + actual_sequence_lengths.append(len(example_token_ids)) + max_seq_len = max(max_seq_len, len(example_token_ids)) + + # Add padding according to max_seq_len + # Some models don't contain pad token, we use unknown token as padding token. + # This doesn't affect the computation since we compute an attention mask + # anyways. + for example_token_ids in batch_token_ids: + padded_token_ids.append( + example_token_ids + + [self.pad_token_id] * (max_seq_len - len(example_token_ids)) + ) + return actual_sequence_lengths, padded_token_ids + + @staticmethod + def _extract_nonpadded_embeddings( + embeddings: np.ndarray, actual_sequence_lengths: List[int] + ) -> np.ndarray: + """Use pre-computed non-padded lengths of each example to extract embeddings + for non-padding tokens. + + Args: + embeddings: sequence level representations for each example of the batch. + actual_sequence_lengths: non-padded lengths of each example of the batch. + + Returns: + Sequence level embeddings for only non-padding tokens of the batch. + """ + nonpadded_sequence_embeddings = [] + for index, embedding in enumerate(embeddings): + unmasked_embedding = embedding[: actual_sequence_lengths[index]] + nonpadded_sequence_embeddings.append(unmasked_embedding) + + return np.array(nonpadded_sequence_embeddings) + + def _compute_batch_sequence_features( + self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]] + ) -> np.ndarray: + """Feed the padded batch to the language model. + + Args: + batch_attention_mask: Mask of 0s and 1s which indicate whether the token + is a padding token or not. + padded_token_ids: Batch of token ids for each example. The batch is padded + and hence can be fed at once. + + Returns: + Sequence level representations from the language model. + """ + model_outputs = self.model( + np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask) + ) + + # sequence hidden states is always the first output from all models + sequence_hidden_states = model_outputs[0] + + sequence_hidden_states = sequence_hidden_states.numpy() + return sequence_hidden_states + + def _get_model_features_for_batch( + self, batch_token_ids: List[List[int]], batch_tokens: List[List[Token]] + ) -> Tuple[np.ndarray, np.ndarray]: + """Compute dense features of each example in the batch. + + We first add the special tokens corresponding to each language model. Next, we + add appropriate padding and compute a mask for that padding so that it doesn't + affect the feature computation. The padded batch is next fed to the language + model and token level embeddings are computed. Using the pre-computed mask, + embeddings for non-padding tokens are extracted and subsequently sentence + level embeddings are computed. + + Args: + batch_token_ids: List of token ids of each example in the batch. + + Returns: + Sentence and token level dense representations. + """ + # Let's first add tokenizer specific special tokens to all examples + batch_token_ids_augmented = self._add_lm_specific_special_tokens( + batch_token_ids + ) + + # Let's first add padding so that whole batch can be fed to the model + actual_sequence_lengths, padded_token_ids = self._add_padding_to_batch( + batch_token_ids_augmented + ) + + # Compute attention mask based on actual_sequence_length + batch_attention_mask = self._compute_attention_mask(actual_sequence_lengths) + + # Get token level features from the model + sequence_hidden_states = self._compute_batch_sequence_features( + batch_attention_mask, padded_token_ids + ) + + # Extract features for only non-padding tokens + sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings( + sequence_hidden_states, actual_sequence_lengths + ) + + # Extract sentence level and post-processed features + ( + sentence_embeddings, + sequence_embeddings, + ) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings) + + # shape of matrix for all sequence embeddings + batch_dim = len(sequence_embeddings) + seq_dim = max(e.shape[0] for e in sequence_embeddings) + feature_dim = sequence_embeddings[0].shape[1] + shape = (batch_dim, seq_dim, feature_dim) + + # align features with tokens so that we have just one vector per token + # (don't include sub-tokens) + sequence_embeddings = train_utils.align_token_features( + batch_tokens, sequence_embeddings, shape + ) + + # sequence_embeddings is a padded numpy array + # remove the padding, keep just the non-zero vectors + sequence_final_embeddings = [] + for embeddings, tokens in zip(sequence_embeddings, batch_tokens): + sequence_final_embeddings.append(embeddings[: len(tokens)]) + sequence_final_embeddings = np.array(sequence_final_embeddings) + + return sentence_embeddings, sequence_final_embeddings + + def _get_docs_for_batch( + self, batch_examples: List[Message], attribute: Text + ) -> List[Dict[Text, Any]]: + """Compute language model docs for all examples in the batch. + + Args: + batch_examples: Batch of message objects for which language model docs + need to be computed. + attribute: Property of message to be processed, one of ``TEXT`` or + ``RESPONSE``. + + Returns: + List of language model docs for each message in batch. + """ + + batch_tokens, batch_token_ids = self._get_token_ids_for_batch( + batch_examples, attribute + ) + + ( + batch_sentence_features, + batch_sequence_features, + ) = self._get_model_features_for_batch(batch_token_ids, batch_tokens) + + # A doc consists of + # {'token_ids': ..., 'tokens': ..., 'sequence_features': ..., + # 'sentence_features': ...} + batch_docs = [] + for index in range(len(batch_examples)): + doc = { + TOKEN_IDS: batch_token_ids[index], + TOKENS: batch_tokens[index], + SEQUENCE_FEATURES: batch_sequence_features[index], + SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)), + } + batch_docs.append(doc) + + return batch_docs + + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: + """Compute tokens and dense features for each message in training data. + + Args: + training_data: NLU training data to be tokenized and featurized + config: NLU pipeline config consisting of all components. + + """ + + batch_size = 64 + + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: + + non_empty_examples = list( + filter(lambda x: x.get(attribute), training_data.training_examples) + ) + + batch_start_index = 0 + + while batch_start_index < len(non_empty_examples): + + batch_end_index = min( + batch_start_index + batch_size, len(non_empty_examples) + ) + # Collect batch examples + batch_messages = non_empty_examples[batch_start_index:batch_end_index] + + # Construct a doc with relevant features + # extracted(tokens, dense_features) + batch_docs = self._get_docs_for_batch(batch_messages, attribute) + + for index, ex in enumerate(batch_messages): + + ex.set(LANGUAGE_MODEL_DOCS[attribute], batch_docs[index]) + + batch_start_index += batch_size + + def process(self, message: Message, **kwargs: Any) -> None: + """Process an incoming message by computing its tokens and dense features. + + Args: + message: Incoming message object + """ + + message.set( + LANGUAGE_MODEL_DOCS[TEXT], + self._get_docs_for_batch([message], attribute=TEXT)[0], + ) diff --git a/rasa/nlu/utils/hugging_face/registry.py b/rasa/nlu/utils/hugging_face/registry.py new file mode 100644 index 000000000000..a6d68cde8747 --- /dev/null +++ b/rasa/nlu/utils/hugging_face/registry.py @@ -0,0 +1,95 @@ +import logging + +# Explicitly set logging level for this module before any import +# because otherwise it logs tensorflow/pytorch versions +logging.getLogger("transformers.file_utils").setLevel(logging.WARNING) + +from transformers import ( + TFBertModel, + TFOpenAIGPTModel, + TFGPT2Model, + TFXLNetModel, + # TFXLMModel, + TFDistilBertModel, + TFRobertaModel, + BertTokenizer, + OpenAIGPTTokenizer, + GPT2Tokenizer, + XLNetTokenizer, + # XLMTokenizer, + DistilBertTokenizer, + RobertaTokenizer, +) +from rasa.nlu.utils.hugging_face.transformers_pre_post_processors import ( + bert_tokens_pre_processor, + gpt_tokens_pre_processor, + xlnet_tokens_pre_processor, + roberta_tokens_pre_processor, + bert_embeddings_post_processor, + gpt_embeddings_post_processor, + xlnet_embeddings_post_processor, + roberta_embeddings_post_processor, + bert_tokens_cleaner, + openaigpt_tokens_cleaner, + gpt2_tokens_cleaner, + xlnet_tokens_cleaner, +) + + +model_class_dict = { + "bert": TFBertModel, + "gpt": TFOpenAIGPTModel, + "gpt2": TFGPT2Model, + "xlnet": TFXLNetModel, + # "xlm": TFXLMModel, # Currently doesn't work because of a bug in transformers library https://github.com/huggingface/transformers/issues/2729 + "distilbert": TFDistilBertModel, + "roberta": TFRobertaModel, +} +model_tokenizer_dict = { + "bert": BertTokenizer, + "gpt": OpenAIGPTTokenizer, + "gpt2": GPT2Tokenizer, + "xlnet": XLNetTokenizer, + # "xlm": XLMTokenizer, + "distilbert": DistilBertTokenizer, + "roberta": RobertaTokenizer, +} +model_weights_defaults = { + "bert": "bert-base-uncased", + "gpt": "openai-gpt", + "gpt2": "gpt2", + "xlnet": "xlnet-base-cased", + # "xlm": "xlm-mlm-enfr-1024", + "distilbert": "distilbert-base-uncased", + "roberta": "roberta-base", +} + +model_special_tokens_pre_processors = { + "bert": bert_tokens_pre_processor, + "gpt": gpt_tokens_pre_processor, + "gpt2": gpt_tokens_pre_processor, + "xlnet": xlnet_tokens_pre_processor, + # "xlm": xlm_tokens_pre_processor, + "distilbert": bert_tokens_pre_processor, + "roberta": roberta_tokens_pre_processor, +} + +model_tokens_cleaners = { + "bert": bert_tokens_cleaner, + "gpt": openaigpt_tokens_cleaner, + "gpt2": gpt2_tokens_cleaner, + "xlnet": xlnet_tokens_cleaner, + # "xlm": xlm_tokens_pre_processor, + "distilbert": bert_tokens_cleaner, # uses the same as BERT + "roberta": gpt2_tokens_cleaner, # Uses the same as GPT2 +} + +model_embeddings_post_processors = { + "bert": bert_embeddings_post_processor, + "gpt": gpt_embeddings_post_processor, + "gpt2": gpt_embeddings_post_processor, + "xlnet": xlnet_embeddings_post_processor, + # "xlm": xlm_embeddings_post_processor, + "distilbert": bert_embeddings_post_processor, + "roberta": roberta_embeddings_post_processor, +} diff --git a/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py b/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py new file mode 100644 index 000000000000..2436e6c7814a --- /dev/null +++ b/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py @@ -0,0 +1,285 @@ +from typing import List, Tuple, Text +import numpy as np + + +def cleanup_tokens( + token_ids_string: List[Tuple[int, Text]], delimiter: Text +) -> Tuple[List[int], List[Text]]: + """Utility method to apply delimiter based cleanup on list of tokens. + + Args: + token_ids_string: List of tuples with each tuple containing (token id, token string). + delimiter: character/string to be cleaned from token strings. + + Returns: + Token ids and Token strings unpacked. + """ + + token_ids_string = [ + (id, string.replace(delimiter, "")) for id, string in token_ids_string + ] + + # remove empty strings + token_ids_string = [(id, string) for id, string in token_ids_string if string] + + # return as individual token ids and token strings + token_ids, token_strings = zip(*token_ids_string) + return token_ids, token_strings + + +def bert_tokens_pre_processor(token_ids: List[int]) -> List[int]: + """Add BERT style special tokens(CLS and SEP). + + Args: + token_ids: List of token ids without any special tokens. + + Returns: + List of token ids augmented with special tokens. + """ + BERT_CLS_ID = 101 + BERT_SEP_ID = 102 + + processed_tokens = token_ids + + processed_tokens.insert(0, BERT_CLS_ID) + processed_tokens.append(BERT_SEP_ID) + + return processed_tokens + + +def gpt_tokens_pre_processor(token_ids: List[int]) -> List[int]: + """Add GPT style special tokens(None). + + Args: + token_ids: List of token ids without any special tokens. + + Returns: + List of token ids augmented with special tokens. + """ + + return token_ids + + +def xlnet_tokens_pre_processor(token_ids: List[int]) -> List[int]: + """Add XLNET style special tokens. + + Args: + token_ids: List of token ids without any special tokens. + + Returns: + List of token ids augmented with special tokens. + """ + XLNET_CLS_ID = 3 + XLNET_SEP_ID = 4 + + token_ids.append(XLNET_SEP_ID) + token_ids.append(XLNET_CLS_ID) + + return token_ids + + +def roberta_tokens_pre_processor(token_ids: List[int]) -> List[int]: + """Add RoBERTa style special tokens. + + Args: + token_ids: List of token ids without any special tokens. + + Returns: + List of token ids augmented with special tokens. + """ + ROBERTA_BEG_ID = 0 + ROBERTA_END_ID = 2 + + token_ids.insert(0, ROBERTA_BEG_ID) + token_ids.append(ROBERTA_END_ID) + + return token_ids + + +def xlm_tokens_pre_processor(token_ids: List[int]) -> List[int]: + """Add XLM style special tokens. + + Args: + token_ids: List of token ids without any special tokens. + + Returns: + List of token ids augmented with special tokens. + """ + XLM_SEP_ID = 1 + + token_ids.insert(0, XLM_SEP_ID) + token_ids.append(XLM_SEP_ID) + + return token_ids + + +def bert_embeddings_post_processor( + sequence_embeddings: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """Post-process embeddings from BERT. + + by removing CLS and SEP embeddings and returning CLS token embedding as + sentence representation. + + Args: + sequence_embeddings: Sequence of token level embeddings received as output from BERT. + + Returns: + sentence level embedding and post-processed sequence level embedding. + """ + sentence_embedding = sequence_embeddings[0] + post_processed_embedding = sequence_embeddings[1:-1] + + return sentence_embedding, post_processed_embedding + + +def gpt_embeddings_post_processor( + sequence_embeddings: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """Post-process embeddings from GPT models. + + by taking a mean over sequence embeddings and returning that as sentence + representation. + + Args: + sequence_embeddings: Sequence of token level embeddings received as output from GPT. + + Returns: + sentence level embedding and post-processed sequence level embedding. + """ + sentence_embedding = np.mean(sequence_embeddings, axis=0) + post_processed_embedding = sequence_embeddings + + return sentence_embedding, post_processed_embedding + + +def xlnet_embeddings_post_processor( + sequence_embeddings: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """Post-process embeddings from XLNet models. + + by taking a mean over sequence embeddings and returning that as sentence + representation. Remove last two time steps corresponding + to special tokens from the sequence embeddings. + + Args: + sequence_embeddings: Sequence of token level embeddings received as output from XLNet. + + Returns: + sentence level embedding and post-processed sequence level embedding. + """ + post_processed_embedding = sequence_embeddings[:-2] + sentence_embedding = np.mean(post_processed_embedding, axis=0) + + return sentence_embedding, post_processed_embedding + + +def roberta_embeddings_post_processor( + sequence_embeddings: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """Post process embeddings from Roberta models. + + by taking a mean over sequence embeddings and returning that as sentence + representation. Remove first and last time steps + corresponding to special tokens from the sequence embeddings. + + Args: + sequence_embeddings: Sequence of token level embeddings received as output from Roberta + + Returns: + sentence level embedding and post-processed sequence level embedding + """ + + post_processed_embedding = sequence_embeddings[1:-1] + sentence_embedding = np.mean(post_processed_embedding, axis=0) + + return sentence_embedding, post_processed_embedding + + +def xlm_embeddings_post_processor( + sequence_embeddings: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """Post process embeddings from XLM models + + by taking a mean over sequence embeddings and returning that as sentence + representation. Remove first and last time steps + corresponding to special tokens from the sequence embeddings. + + Args: + sequence_embeddings: Sequence of token level embeddings received as output from XLM + + Returns: + sentence level embedding and post-processed sequence level embedding + """ + post_processed_embedding = sequence_embeddings[1:-1] + sentence_embedding = np.mean(post_processed_embedding, axis=0) + + return sentence_embedding, post_processed_embedding + + +def bert_tokens_cleaner( + token_ids: List[int], token_strings: List[Text] +) -> Tuple[List[int], List[Text]]: + """Token cleanup method for BERT. + + Clean up tokens with the extra delimiters(##) BERT adds while breaking a token into sub-tokens. + + Args: + token_ids: List of token ids received as output from BERT Tokenizer. + token_strings: List of token strings received as output from BERT Tokenizer. + + Returns: + Cleaned token ids and token strings. + """ + return cleanup_tokens(list(zip(token_ids, token_strings)), "##") + + +def openaigpt_tokens_cleaner( + token_ids: List[int], token_strings: List[Text] +) -> Tuple[List[int], List[Text]]: + """Token cleanup method for GPT. + + Clean up tokens with the extra delimiters(</w>) OpenAIGPT adds while breaking a token into sub-tokens. + + Args: + token_ids: List of token ids received as output from GPT Tokenizer. + token_strings: List of token strings received as output from GPT Tokenizer. + + Returns: + Cleaned token ids and token strings. + """ + return cleanup_tokens(list(zip(token_ids, token_strings)), "</w>") + + +def gpt2_tokens_cleaner( + token_ids: List[int], token_strings: List[Text] +) -> Tuple[List[int], List[Text]]: + """Token cleanup method for GPT2. + + Clean up tokens with the extra delimiters(Ġ) GPT2 adds while breaking a token into sub-tokens. + + Args: + token_ids: List of token ids received as output from GPT Tokenizer. + token_strings: List of token strings received as output from GPT Tokenizer. + + Returns: + Cleaned token ids and token strings. + """ + return cleanup_tokens(list(zip(token_ids, token_strings)), "Ġ") + + +def xlnet_tokens_cleaner( + token_ids: List[int], token_strings: List[Text] +) -> Tuple[List[int], List[Text]]: + """Token cleanup method for XLNet. + + Clean up tokens with the extra delimiters(▁) XLNet adds while breaking a token into sub-tokens. + + Args: + token_ids: List of token ids received as output from GPT Tokenizer. + token_strings: List of token strings received as output from GPT Tokenizer. + + Returns: + Cleaned token ids and token strings. + """ + return cleanup_tokens(list(zip(token_ids, token_strings)), "▁") diff --git a/rasa/nlu/utils/mitie_utils.py b/rasa/nlu/utils/mitie_utils.py index e8573b5fae31..91d37cc392d7 100644 --- a/rasa/nlu/utils/mitie_utils.py +++ b/rasa/nlu/utils/mitie_utils.py @@ -12,8 +12,6 @@ class MitieNLP(Component): - provides = ["mitie_feature_extractor", "mitie_file"] - defaults = { # name of the language model to load - this contains # the MITIE feature extractor @@ -25,7 +23,7 @@ def __init__( ) -> None: """Construct a new language model from the MITIE framework.""" - super(MitieNLP, self).__init__(component_config) + super().__init__(component_config) self.extractor = extractor @@ -76,7 +74,7 @@ def provide_context(self) -> Dict[Text, Any]: @staticmethod def ensure_proper_language_model( - extractor: Optional["mitie.total_word_feature_extractor"] + extractor: Optional["mitie.total_word_feature_extractor"], ) -> None: if extractor is None: @@ -92,7 +90,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional[Metadata] = None, cached_component: Optional["MitieNLP"] = None, - **kwargs: Any + **kwargs: Any, ) -> "MitieNLP": import mitie diff --git a/rasa/nlu/utils/pattern_utils.py b/rasa/nlu/utils/pattern_utils.py new file mode 100644 index 000000000000..e0446eda249b --- /dev/null +++ b/rasa/nlu/utils/pattern_utils.py @@ -0,0 +1,139 @@ +import re +from typing import Dict, List, Text, Union + +import rasa.utils.io as io_utils +from rasa.nlu.training_data import TrainingData + + +def _convert_lookup_tables_to_regex( + training_data: TrainingData, use_only_entities: bool = False +) -> List[Dict[Text, Text]]: + """Convert the lookup tables from the training data to regex patterns. + Args: + training_data: The training data. + use_only_entities: If True only regex features with a name equal to a entity + are considered. + + Returns: + A list of regex patterns. + """ + patterns = [] + for table in training_data.lookup_tables: + if use_only_entities and table["name"] not in training_data.entities: + continue + regex_pattern = _generate_lookup_regex(table) + lookup_regex = {"name": table["name"], "pattern": regex_pattern} + patterns.append(lookup_regex) + return patterns + + +def _generate_lookup_regex(lookup_table: Dict[Text, Union[Text, List[Text]]]) -> Text: + """Creates a regex pattern from the given lookup table. + + The lookup table is either a file or a list of entries. + + Args: + lookup_table: The lookup table. + + Returns: + The regex pattern. + """ + lookup_elements = lookup_table["elements"] + + # if it's a list, it should be the elements directly + if isinstance(lookup_elements, list): + elements_to_regex = lookup_elements + # otherwise it's a file path. + else: + elements_to_regex = _read_lookup_table_file(lookup_elements) + + # sanitize the regex, escape special characters + elements_sanitized = [re.escape(e) for e in elements_to_regex] + + # regex matching elements with word boundaries on either side + return "(\\b" + "\\b|\\b".join(elements_sanitized) + "\\b)" + + +def _read_lookup_table_file(lookup_table_file: Text) -> List[Text]: + """Read the lookup table file. + + Args: + lookup_table_file: the file path to the lookup table + + Returns: + Elements listed in the lookup table file. + """ + try: + f = open(lookup_table_file, "r", encoding=io_utils.DEFAULT_ENCODING) + except OSError: + raise ValueError( + f"Could not load lookup table {lookup_table_file}. " + f"Please make sure you've provided the correct path." + ) + + elements_to_regex = [] + with f: + for line in f: + new_element = line.strip() + if new_element: + elements_to_regex.append(new_element) + return elements_to_regex + + +def _collect_regex_features( + training_data: TrainingData, use_only_entities: bool = False +) -> List[Dict[Text, Text]]: + """Get regex features from training data. + + Args: + training_data: The training data + use_only_entities: If True only regex features with a name equal to a entity + are considered. + + Returns: + Regex features. + """ + if not use_only_entities: + return training_data.regex_features + + return [ + regex + for regex in training_data.regex_features + if regex["name"] in training_data.entities + ] + + +def extract_patterns( + training_data: TrainingData, + use_lookup_tables: bool = True, + use_regexes: bool = True, + use_only_entities: bool = False, +) -> List[Dict[Text, Text]]: + """Extract a list of patterns from the training data. + + The patterns are constructed using the regex features and lookup tables defined + in the training data. + + Args: + training_data: The training data. + use_only_entities: If True only lookup tables and regex features with a name + equal to a entity are considered. + use_regexes: Boolean indicating whether to use regex features or not. + use_lookup_tables: Boolean indicating whether to use lookup tables or not. + + Returns: + The list of regex patterns. + """ + if not training_data.lookup_tables and not training_data.regex_features: + return [] + + patterns = [] + + if use_regexes: + patterns.extend(_collect_regex_features(training_data, use_only_entities)) + if use_lookup_tables: + patterns.extend( + _convert_lookup_tables_to_regex(training_data, use_only_entities) + ) + + return patterns diff --git a/rasa/nlu/utils/spacy_utils.py b/rasa/nlu/utils/spacy_utils.py index 5c9a2c5e79ba..3eae015409d1 100644 --- a/rasa/nlu/utils/spacy_utils.py +++ b/rasa/nlu/utils/spacy_utils.py @@ -1,6 +1,6 @@ import logging import typing -from typing import Any, Dict, List, Optional, Text +from typing import Any, Dict, List, Optional, Text, Tuple from rasa.nlu.components import Component from rasa.nlu.config import RasaNLUModelConfig, override_defaults @@ -14,23 +14,10 @@ from spacy.tokens.doc import Doc # pytype: disable=import-error from rasa.nlu.model import Metadata -from rasa.nlu.constants import ( - MESSAGE_RESPONSE_ATTRIBUTE, - MESSAGE_INTENT_ATTRIBUTE, - MESSAGE_TEXT_ATTRIBUTE, - MESSAGE_TOKENS_NAMES, - MESSAGE_ATTRIBUTES, - MESSAGE_SPACY_FEATURES_NAMES, - MESSAGE_VECTOR_FEATURE_NAMES, - SPACY_FEATURIZABLE_ATTRIBUTES, -) +from rasa.nlu.constants import TEXT, SPACY_DOCS, DENSE_FEATURIZABLE_ATTRIBUTES class SpacyNLP(Component): - provides = ["spacy_nlp"] + [ - MESSAGE_SPACY_FEATURES_NAMES[attribute] - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES - ] defaults = { # name of the language model to load - if it is not set @@ -50,7 +37,7 @@ def __init__( ) -> None: self.nlp = nlp - super(SpacyNLP, self).__init__(component_config) + super().__init__(component_config) @staticmethod def load_model(spacy_model_name: Text) -> "Language": @@ -86,9 +73,7 @@ def create( spacy_model_name = config.language component_config["model"] = config.language - logger.info( - "Trying to load spacy model with name '{}'".format(spacy_model_name) - ) + logger.info(f"Trying to load spacy model with name '{spacy_model_name}'") nlp = cls.load_model(spacy_model_name) @@ -113,7 +98,7 @@ def doc_for_text(self, text: Text) -> "Doc": return self.nlp(self.preprocess_text(text)) - def preprocess_text(self, text): + def preprocess_text(self, text: Optional[Text]) -> Text: if text is None: # converted to empty string so that it can still be passed to spacy. @@ -125,22 +110,103 @@ def preprocess_text(self, text): else: return text.lower() - def get_text(self, example, attribute): + def get_text(self, example: Dict[Text, Any], attribute: Text) -> Text: return self.preprocess_text(example.get(attribute)) + @staticmethod + def merge_content_lists( + indexed_training_samples: List[Tuple[int, Text]], + doc_lists: List[Tuple[int, "Doc"]], + ) -> List[Tuple[int, "Doc"]]: + """Merge lists with processed Docs back into their original order.""" + + dct = dict(indexed_training_samples) + dct.update(dict(doc_lists)) + return sorted(dct.items()) + + @staticmethod + def filter_training_samples_by_content( + indexed_training_samples: List[Tuple[int, Text]] + ) -> Tuple[List[Tuple[int, Text]], List[Tuple[int, Text]]]: + """Separates empty training samples from content bearing ones.""" + + docs_to_pipe = list( + filter( + lambda training_sample: training_sample[1] != "", + indexed_training_samples, + ) + ) + empty_docs = list( + filter( + lambda training_sample: training_sample[1] == "", + indexed_training_samples, + ) + ) + return docs_to_pipe, empty_docs + + def process_content_bearing_samples( + self, samples_to_pipe: List[Tuple[int, Text]] + ) -> List[Tuple[int, "Doc"]]: + """Sends content bearing training samples to spaCy's pipe.""" + + docs = [ + (to_pipe_sample[0], doc) + for to_pipe_sample, doc in zip( + samples_to_pipe, + [ + doc + for doc in self.nlp.pipe( + [txt for _, txt in samples_to_pipe], batch_size=50 + ) + ], + ) + ] + return docs + + def process_non_content_bearing_samples( + self, empty_samples: List[Tuple[int, Text]] + ) -> List[Tuple[int, "Doc"]]: + """Creates empty Doc-objects from zero-lengthed training samples strings.""" + + from spacy.tokens import Doc + + n_docs = [ + (empty_sample[0], doc) + for empty_sample, doc in zip( + empty_samples, [Doc(self.nlp.vocab) for doc in empty_samples] + ) + ] + return n_docs + def docs_for_training_data( self, training_data: TrainingData ) -> Dict[Text, List[Any]]: - attribute_docs = {} - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES: - + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: texts = [self.get_text(e, attribute) for e in training_data.intent_examples] + # Index and freeze indices of the training samples for preserving the order + # after processing the data. + indexed_training_samples = [(idx, text) for idx, text in enumerate(texts)] + + samples_to_pipe, empty_samples = self.filter_training_samples_by_content( + indexed_training_samples + ) - docs = [doc for doc in self.nlp.pipe(texts, batch_size=50)] + content_bearing_docs = self.process_content_bearing_samples(samples_to_pipe) - attribute_docs[attribute] = docs + non_content_bearing_docs = self.process_non_content_bearing_samples( + empty_samples + ) + + attribute_document_list = self.merge_content_lists( + indexed_training_samples, + content_bearing_docs + non_content_bearing_docs, + ) + + # Since we only need the training samples strings, we create a list to get them out + # of the tuple. + attribute_docs[attribute] = [doc for _, doc in attribute_document_list] return attribute_docs def train( @@ -149,23 +215,18 @@ def train( attribute_docs = self.docs_for_training_data(training_data) - for attribute in SPACY_FEATURIZABLE_ATTRIBUTES: + for attribute in DENSE_FEATURIZABLE_ATTRIBUTES: for idx, example in enumerate(training_data.training_examples): example_attribute_doc = attribute_docs[attribute][idx] if len(example_attribute_doc): # If length is 0, that means the initial text feature was None and was replaced by '' # in preprocess method - example.set( - MESSAGE_SPACY_FEATURES_NAMES[attribute], example_attribute_doc - ) + example.set(SPACY_DOCS[attribute], example_attribute_doc) def process(self, message: Message, **kwargs: Any) -> None: - message.set( - MESSAGE_SPACY_FEATURES_NAMES[MESSAGE_TEXT_ATTRIBUTE], - self.doc_for_text(message.text), - ) + message.set(SPACY_DOCS[TEXT], self.doc_for_text(message.text)) @classmethod def load( @@ -174,7 +235,7 @@ def load( model_dir: Text = None, model_metadata: "Metadata" = None, cached_component: Optional["SpacyNLP"] = None, - **kwargs: Any + **kwargs: Any, ) -> "SpacyNLP": if cached_component: diff --git a/rasa/run.py b/rasa/run.py index fc7d82d16703..8b0837746bb5 100644 --- a/rasa/run.py +++ b/rasa/run.py @@ -17,7 +17,7 @@ def run( endpoints: Text, connector: Text = None, credentials: Text = None, - **kwargs: Dict + **kwargs: Dict, ): """Runs a Rasa model. @@ -53,24 +53,21 @@ def run( channel=connector, credentials=credentials, endpoints=_endpoints, - **kwargs + **kwargs, ) def create_agent(model: Text, endpoints: Text = None) -> "Agent": from rasa.core.tracker_store import TrackerStore - import rasa.core.brokers.utils as broker_utils from rasa.core.utils import AvailableEndpoints from rasa.core.agent import Agent + from rasa.core.brokers.broker import EventBroker _endpoints = AvailableEndpoints.read_endpoints(endpoints) - _broker = broker_utils.from_endpoint_config(_endpoints.event_broker) - - _tracker_store = TrackerStore.find_tracker_store( - None, _endpoints.tracker_store, _broker - ) - _lock_store = LockStore.find_lock_store(_endpoints.lock_store) + _broker = EventBroker.create(_endpoints.event_broker) + _tracker_store = TrackerStore.create(_endpoints.tracker_store, event_broker=_broker) + _lock_store = LockStore.create(_endpoints.lock_store) return Agent.load( model, diff --git a/rasa/server.py b/rasa/server.py index d4a716498df8..4afdc8b1dc8c 100644 --- a/rasa/server.py +++ b/rasa/server.py @@ -1,33 +1,41 @@ +import asyncio +import functools import logging +import multiprocessing import os +from pathlib import Path import tempfile import traceback -from functools import wraps, reduce +import typing +from functools import reduce, wraps from inspect import isawaitable -from typing import Any, Callable, List, Optional, Text, Union -from ssl import SSLContext +from pathlib import Path +from typing import Any, Callable, List, Optional, Text, Union, Dict -from sanic import Sanic, response -from sanic.request import Request -from sanic_cors import CORS -from sanic_jwt import Initialize, exceptions +from sanic.exceptions import InvalidUsage +from rasa.nlu.training_data.formats import RasaYAMLReader import rasa -import rasa.core.brokers.utils as broker_utils -import rasa.utils.common +import rasa.core.utils +from rasa.utils import common as common_utils import rasa.utils.endpoints import rasa.utils.io +from rasa import model from rasa.constants import ( - MINIMUM_COMPATIBLE_VERSION, - DEFAULT_MODELS_PATH, DEFAULT_DOMAIN_PATH, + DEFAULT_MODELS_PATH, + DEFAULT_RESPONSE_TIMEOUT, DOCS_BASE_URL, + MINIMUM_COMPATIBLE_VERSION, + DOCS_URL_TRAINING_DATA_NLU, ) -from rasa.core.agent import load_agent, Agent +from rasa.core import agent +from rasa.core.agent import Agent +from rasa.core.brokers.broker import EventBroker from rasa.core.channels.channel import ( - UserMessage, CollectingOutputChannel, OutputChannel, + UserMessage, ) from rasa.core.domain import InvalidDomain from rasa.core.events import Event @@ -35,20 +43,38 @@ from rasa.core.test import test from rasa.core.tracker_store import TrackerStore from rasa.core.trackers import DialogueStateTracker, EventVerbosity -from rasa.core.utils import dump_obj_as_str_to_file, AvailableEndpoints -from rasa.model import get_model_subdirectories, fingerprint_from_path +from rasa.core.utils import AvailableEndpoints from rasa.nlu.emulators.no_emulator import NoEmulator from rasa.nlu.test import run_evaluation from rasa.utils.endpoints import EndpointConfig +from sanic import Sanic, response +from sanic.request import Request +from sanic.response import HTTPResponse +from sanic_cors import CORS +from sanic_jwt import Initialize, exceptions + +if typing.TYPE_CHECKING: + from ssl import SSLContext + from rasa.core.processor import MessageProcessor logger = logging.getLogger(__name__) +JSON_CONTENT_TYPE = "application/json" +YAML_CONTENT_TYPE = "application/x-yaml" + OUTPUT_CHANNEL_QUERY_KEY = "output_channel" USE_LATEST_INPUT_CHANNEL_AS_OUTPUT_CHANNEL = "latest" class ErrorResponse(Exception): - def __init__(self, status, reason, message, details=None, help_url=None): + def __init__( + self, + status: int, + reason: Text, + message: Text, + details: Any = None, + help_url: Optional[Text] = None, + ) -> None: self.error_info = { "version": rasa.__version__, "status": "failure", @@ -87,7 +113,7 @@ def decorated(*args, **kwargs): "Conflict", "No agent loaded. To continue processing, a " "model of a trained agent needs to be loaded.", - help_url=_docs("/user-guide/running-the-server/"), + help_url=_docs("/user-guide/configuring-http-api/"), ) return f(*args, **kwargs) @@ -102,7 +128,7 @@ def requires_auth(app: Sanic, token: Optional[Text] = None) -> Callable[[Any], A def decorator(f: Callable[[Any, Any], Any]) -> Callable[[Any, Any], Any]: def conversation_id_from_args(args: Any, kwargs: Any) -> Optional[Text]: - argnames = rasa.utils.common.arguments_of(f) + argnames = common_utils.arguments_of(f) try: sender_id_arg_idx = argnames.index("conversation_id") @@ -153,7 +179,7 @@ async def decorated(request: Request, *args: Any, **kwargs: Any) -> Any: "NotAuthorized", "User has insufficient permissions.", help_url=_docs( - "/user-guide/running-the-server/#security-considerations" + "/user-guide/configuring-http-api/#security-considerations" ), ) elif token is None and app.config.get("USE_JWT") is None: @@ -167,7 +193,7 @@ async def decorated(request: Request, *args: Any, **kwargs: Any) -> Any: "NotAuthenticated", "User is not authenticated.", help_url=_docs( - "/user-guide/running-the-server/#security-considerations" + "/user-guide/configuring-http-api/#security-considerations" ), ) @@ -179,6 +205,7 @@ async def decorated(request: Request, *args: Any, **kwargs: Any) -> Any: def event_verbosity_parameter( request: Request, default_verbosity: EventVerbosity ) -> EventVerbosity: + """Create `EventVerbosity` object using request params if present.""" event_verbosity_str = request.args.get( "include_events", default_verbosity.name ).upper() @@ -195,24 +222,37 @@ def event_verbosity_parameter( ) -def get_tracker(agent: "Agent", conversation_id: Text) -> DialogueStateTracker: - tracker = agent.tracker_store.get_or_create_tracker(conversation_id) +async def get_tracker( + processor: "MessageProcessor", conversation_id: Text +) -> DialogueStateTracker: + """Get tracker object from `MessageProcessor`.""" + tracker = await processor.get_tracker_with_session_start(conversation_id) + _validate_tracker(tracker, conversation_id) + + # `_validate_tracker` ensures we can't return `None` so `Optional` is not needed + return tracker # pytype: disable=bad-return-type + + +def _validate_tracker( + tracker: Optional[DialogueStateTracker], conversation_id: Text +) -> None: if not tracker: raise ErrorResponse( 409, "Conflict", - "Could not retrieve tracker with id '{}'. Most likely " - "because there is no domain set on the agent.".format(conversation_id), + f"Could not retrieve tracker with ID '{conversation_id}'. Most likely " + f"because there is no domain set on the agent.", ) - return tracker def validate_request_body(request: Request, error_message: Text): + """Check if `request` has a body.""" if not request.body: raise ErrorResponse(400, "BadRequest", error_message) async def authenticate(request: Request): + """Callback for authentication failed.""" raise exceptions.AuthenticationFailed( "Direct JWT authentication not supported. You should already have " "a valid JWT from an authentication provider, Rasa will just make " @@ -223,14 +263,28 @@ async def authenticate(request: Request): def create_ssl_context( ssl_certificate: Optional[Text], ssl_keyfile: Optional[Text], - ssl_password: Optional[Text], -) -> Optional[SSLContext]: - """Create a SSL context (for the sanic server) if a proper certificate is passed.""" + ssl_ca_file: Optional[Text] = None, + ssl_password: Optional[Text] = None, +) -> Optional["SSLContext"]: + """Create an SSL context if a proper certificate is passed. + + Args: + ssl_certificate: path to the SSL client certificate + ssl_keyfile: path to the SSL key file + ssl_ca_file: path to the SSL CA file for verification (optional) + ssl_password: SSL private key password (optional) + + Returns: + SSL context if a valid certificate chain can be loaded, `None` otherwise. + + """ if ssl_certificate: import ssl - ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) + ssl_context = ssl.create_default_context( + purpose=ssl.Purpose.CLIENT_AUTH, cafile=ssl_ca_file + ) ssl_context.load_cert_chain( ssl_certificate, keyfile=ssl_keyfile, password=ssl_password ) @@ -280,16 +334,16 @@ async def _load_agent( action_endpoint = None if endpoints: - _broker = broker_utils.from_endpoint_config(endpoints.event_broker) - tracker_store = TrackerStore.find_tracker_store( - None, endpoints.tracker_store, _broker + broker = EventBroker.create(endpoints.event_broker) + tracker_store = TrackerStore.create( + endpoints.tracker_store, event_broker=broker ) generator = endpoints.nlg action_endpoint = endpoints.action if not lock_store: - lock_store = LockStore.find_lock_store(endpoints.lock_store) + lock_store = LockStore.create(endpoints.lock_store) - loaded_agent = await load_agent( + loaded_agent = await agent.load_agent( model_path, model_server, remote_storage, @@ -301,21 +355,39 @@ async def _load_agent( except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, "LoadingError", "An unexpected error occurred. Error: {}".format(e) + 500, "LoadingError", f"An unexpected error occurred. Error: {e}" ) if not loaded_agent: raise ErrorResponse( 400, "BadRequest", - "Agent with name '{}' could not be loaded.".format(model_path), + f"Agent with name '{model_path}' could not be loaded.", {"parameter": "model", "in": "query"}, ) return loaded_agent +def configure_cors( + app: Sanic, cors_origins: Union[Text, List[Text], None] = "" +) -> None: + """Configure CORS origins for the given app.""" + + # Workaround so that socketio works with requests from other origins. + # https://github.com/miguelgrinberg/python-socketio/issues/205#issuecomment-493769183 + app.config.CORS_AUTOMATIC_OPTIONS = True + app.config.CORS_SUPPORTS_CREDENTIALS = True + app.config.CORS_EXPOSE_HEADERS = "filename" + + CORS( + app, resources={r"/*": {"origins": cors_origins or ""}}, automatic_options=True + ) + + def add_root_route(app: Sanic): + """Add '/' route to return hello.""" + @app.get("/") async def hello(request: Request): """Check if the server is running and responds with the version.""" @@ -324,8 +396,9 @@ async def hello(request: Request): def create_app( agent: Optional["Agent"] = None, - cors_origins: Union[Text, List[Text]] = "*", + cors_origins: Union[Text, List[Text], None] = "*", auth_token: Optional[Text] = None, + response_timeout: int = DEFAULT_RESPONSE_TIMEOUT, jwt_secret: Optional[Text] = None, jwt_method: Text = "HS256", endpoints: Optional[AvailableEndpoints] = None, @@ -333,15 +406,8 @@ def create_app( """Class representing a Rasa HTTP server.""" app = Sanic(__name__) - app.config.RESPONSE_TIMEOUT = 60 * 60 - # Workaround so that socketio works with requests from other origins. - # https://github.com/miguelgrinberg/python-socketio/issues/205#issuecomment-493769183 - app.config.CORS_AUTOMATIC_OPTIONS = True - app.config.CORS_SUPPORTS_CREDENTIALS = True - - CORS( - app, resources={r"/*": {"origins": cors_origins or ""}}, automatic_options=True - ) + app.config.RESPONSE_TIMEOUT = response_timeout + configure_cors(app, cors_origins) # Setup the Sanic-JWT extension if jwt_secret and jwt_method: @@ -358,6 +424,9 @@ def create_app( ) app.agent = agent + # Initialize shared object of type unsigned int for tracking + # the number of active training processes + app.active_training_processes = multiprocessing.Value("I", 0) @app.exception(ErrorResponse) async def handle_error_response(request: Request, exception: ErrorResponse): @@ -384,8 +453,10 @@ async def status(request: Request): return response.json( { - "model_file": app.agent.model_directory, - "fingerprint": fingerprint_from_path(app.agent.model_directory), + "model_file": app.agent.path_to_model_archive + or app.agent.model_directory, + "fingerprint": model.fingerprint_from_path(app.agent.model_directory), + "num_active_training_jobs": app.active_training_processes.value, } ) @@ -398,7 +469,7 @@ async def retrieve_tracker(request: Request, conversation_id: Text): verbosity = event_verbosity_parameter(request, EventVerbosity.AFTER_RESTART) until_time = rasa.utils.endpoints.float_arg(request, "until") - tracker = get_tracker(app.agent, conversation_id) + tracker = await get_tracker(app.agent.create_processor(), conversation_id) try: if until_time is not None: @@ -409,9 +480,7 @@ async def retrieve_tracker(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) @app.post("/conversations/<conversation_id>/tracker/events") @@ -425,7 +494,30 @@ async def append_events(request: Request, conversation_id: Text): "to the state of a conversation.", ) + verbosity = event_verbosity_parameter(request, EventVerbosity.AFTER_RESTART) + + try: + async with app.agent.lock_store.lock(conversation_id): + processor = app.agent.create_processor() + tracker = processor.get_tracker(conversation_id) + _validate_tracker(tracker, conversation_id) + + events = _get_events_from_request_body(request) + + for event in events: + tracker.update(event, app.agent.domain) + app.agent.tracker_store.save(tracker) + + return response.json(tracker.current_state(verbosity)) + except Exception as e: + logger.debug(traceback.format_exc()) + raise ErrorResponse( + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" + ) + + def _get_events_from_request_body(request: Request) -> List[Event]: events = request.json + if not isinstance(events, list): events = [events] @@ -433,9 +525,9 @@ async def append_events(request: Request, conversation_id: Text): events = [event for event in events if event] if not events: - logger.warning( - "Append event called, but could not extract a valid event. " - "Request JSON: {}".format(request.json) + common_utils.raise_warning( + f"Append event called, but could not extract a valid event. " + f"Request JSON: {request.json}" ) raise ErrorResponse( 400, @@ -444,23 +536,7 @@ async def append_events(request: Request, conversation_id: Text): {"parameter": "", "in": "body"}, ) - verbosity = event_verbosity_parameter(request, EventVerbosity.AFTER_RESTART) - - try: - async with app.agent.lock_store.lock(conversation_id): - tracker = get_tracker(app.agent, conversation_id) - for event in events: - tracker.update(event, app.agent.domain) - app.agent.tracker_store.save(tracker) - - return response.json(tracker.current_state(verbosity)) - except Exception as e: - logger.debug(traceback.format_exc()) - raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), - ) + return events @app.put("/conversations/<conversation_id>/tracker/events") @requires_auth(app, auth_token) @@ -488,9 +564,7 @@ async def replace_events(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) @app.get("/conversations/<conversation_id>/story") @@ -500,7 +574,7 @@ async def retrieve_story(request: Request, conversation_id: Text): """Get an end-to-end story corresponding to this conversation.""" # retrieve tracker and set to requested state - tracker = get_tracker(app.agent, conversation_id) + tracker = await get_tracker(app.agent.create_processor(), conversation_id) until_time = rasa.utils.endpoints.float_arg(request, "until") @@ -514,9 +588,7 @@ async def retrieve_story(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) @app.post("/conversations/<conversation_id>/execute") @@ -541,7 +613,9 @@ async def execute_action(request: Request, conversation_id: Text): try: async with app.agent.lock_store.lock(conversation_id): - tracker = get_tracker(app.agent, conversation_id) + tracker = await get_tracker( + app.agent.create_processor(), conversation_id + ) output_channel = _get_output_channel(request, tracker) await app.agent.execute_action( conversation_id, @@ -554,12 +628,64 @@ async def execute_action(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" + ) + + tracker = await get_tracker(app.agent.create_processor(), conversation_id) + state = tracker.current_state(verbosity) + + response_body = {"tracker": state} + + if isinstance(output_channel, CollectingOutputChannel): + response_body["messages"] = output_channel.messages + + return response.json(response_body) + + @app.post("/conversations/<conversation_id>/trigger_intent") + @requires_auth(app, auth_token) + @ensure_loaded_agent(app) + async def trigger_intent(request: Request, conversation_id: Text) -> HTTPResponse: + request_params = request.json + + intent_to_trigger = request_params.get("name") + entities = request_params.get("entities", []) + + if not intent_to_trigger: + raise ErrorResponse( + 400, + "BadRequest", + "Name of the intent not provided in request body.", + {"parameter": "name", "in": "body"}, + ) + + verbosity = event_verbosity_parameter(request, EventVerbosity.AFTER_RESTART) + + try: + async with app.agent.lock_store.lock(conversation_id): + tracker = await get_tracker( + app.agent.create_processor(), conversation_id + ) + output_channel = _get_output_channel(request, tracker) + if intent_to_trigger not in app.agent.domain.intents: + raise ErrorResponse( + 404, + "NotFound", + f"The intent {trigger_intent} does not exist in the domain.", + ) + await app.agent.trigger_intent( + intent_name=intent_to_trigger, + entities=entities, + output_channel=output_channel, + tracker=tracker, + ) + except ErrorResponse: + raise + except Exception as e: + logger.debug(traceback.format_exc()) + raise ErrorResponse( + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) - tracker = get_tracker(app.agent, conversation_id) state = tracker.current_state(verbosity) response_body = {"tracker": state} @@ -572,10 +698,10 @@ async def execute_action(request: Request, conversation_id: Text): @app.post("/conversations/<conversation_id>/predict") @requires_auth(app, auth_token) @ensure_loaded_agent(app) - async def predict(request: Request, conversation_id: Text): + async def predict(request: Request, conversation_id: Text) -> HTTPResponse: try: # Fetches the appropriate bot response in a json format - responses = app.agent.predict_next(conversation_id) + responses = await app.agent.predict_next(conversation_id) responses["scores"] = sorted( responses["scores"], key=lambda k: (-k["score"], k["action"]) ) @@ -583,9 +709,7 @@ async def predict(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) @app.post("/conversations/<conversation_id>/messages") @@ -625,16 +749,13 @@ async def add_message(request: Request, conversation_id: Text): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "ConversationError", - "An unexpected error occurred. Error: {}".format(e), + 500, "ConversationError", f"An unexpected error occurred. Error: {e}" ) @app.post("/model/train") @requires_auth(app, auth_token) - async def train(request: Request): + async def train(request: Request) -> HTTPResponse: """Train a Rasa Model.""" - from rasa.train import train_async validate_request_body( request, @@ -642,88 +763,61 @@ async def train(request: Request): "train your model.", ) - rjs = request.json - validate_request(rjs) - - # create a temporary directory to store config, domain and - # training data - temp_dir = tempfile.mkdtemp() - - config_path = os.path.join(temp_dir, "config.yml") - dump_obj_as_str_to_file(config_path, rjs["config"]) + if request.headers.get("Content-type") == YAML_CONTENT_TYPE: + training_payload = _training_payload_from_yaml(request) + else: + training_payload = _training_payload_from_json(request) - if "nlu" in rjs: - nlu_path = os.path.join(temp_dir, "nlu.md") - dump_obj_as_str_to_file(nlu_path, rjs["nlu"]) + try: + with app.active_training_processes.get_lock(): + app.active_training_processes.value += 1 - if "stories" in rjs: - stories_path = os.path.join(temp_dir, "stories.md") - dump_obj_as_str_to_file(stories_path, rjs["stories"]) + loop = asyncio.get_event_loop() - domain_path = DEFAULT_DOMAIN_PATH - if "domain" in rjs: - domain_path = os.path.join(temp_dir, "domain.yml") - dump_obj_as_str_to_file(domain_path, rjs["domain"]) + from rasa import train as train_model - try: - model_path = await train_async( - domain=domain_path, - config=config_path, - training_files=temp_dir, - output_path=rjs.get("out", DEFAULT_MODELS_PATH), - force_training=rjs.get("force", False), + # Declare `model_path` upfront to avoid pytype `name-error` + model_path: Optional[Text] = None + # pass `None` to run in default executor + model_path = await loop.run_in_executor( + None, functools.partial(train_model, **training_payload) ) - filename = os.path.basename(model_path) if model_path else None + if model_path: + filename = os.path.basename(model_path) - return await response.file( - model_path, filename=filename, headers={"filename": filename} - ) + return await response.file( + model_path, filename=filename, headers={"filename": filename} + ) + else: + raise ErrorResponse( + 500, + "TrainingError", + "Ran training, but it finished without a trained model.", + ) + except ErrorResponse as e: + raise e except InvalidDomain as e: raise ErrorResponse( 400, "InvalidDomainError", - "Provided domain file is invalid. Error: {}".format(e), + f"Provided domain file is invalid. Error: {e}", ) except Exception as e: - logger.debug(traceback.format_exc()) + logger.error(traceback.format_exc()) raise ErrorResponse( 500, "TrainingError", - "An unexpected error occurred during training. Error: {}".format(e), - ) - - def validate_request(rjs): - if "config" not in rjs: - raise ErrorResponse( - 400, - "BadRequest", - "The training request is missing the required key `config`.", - {"parameter": "config", "in": "body"}, - ) - - if "nlu" not in rjs and "stories" not in rjs: - raise ErrorResponse( - 400, - "BadRequest", - "To train a Rasa model you need to specify at least one type of " - "training data. Add `nlu` and/or `stories` to the request.", - {"parameters": ["nlu", "stories"], "in": "body"}, - ) - - if "stories" in rjs and "domain" not in rjs: - raise ErrorResponse( - 400, - "BadRequest", - "To train a Rasa model with story training data, you also need to " - "specify the `domain`.", - {"parameter": "domain", "in": "body"}, + f"An unexpected error occurred during training. Error: {e}", ) + finally: + with app.active_training_processes.get_lock(): + app.active_training_processes.value -= 1 @app.post("/model/test/stories") @requires_auth(app, auth_token) @ensure_loaded_agent(app, require_core_is_ready=True) - async def evaluate_stories(request: Request): + async def evaluate_stories(request: Request) -> HTTPResponse: """Evaluate stories against the currently loaded model.""" validate_request_body( request, @@ -731,23 +825,24 @@ async def evaluate_stories(request: Request): "evaluate your model.", ) - stories = rasa.utils.io.create_temporary_file(request.body, mode="w+b") + test_data = _test_data_file_from_payload(request) + use_e2e = rasa.utils.endpoints.bool_arg(request, "e2e", default=False) try: - evaluation = await test(stories, app.agent, e2e=use_e2e) + evaluation = await test(test_data, app.agent, e2e=use_e2e) return response.json(evaluation) except Exception as e: - logger.debug(traceback.format_exc()) + logger.error(traceback.format_exc()) raise ErrorResponse( 500, "TestingError", - "An unexpected error occurred during evaluation. Error: {}".format(e), + f"An unexpected error occurred during evaluation. Error: {e}", ) @app.post("/model/test/intents") @requires_auth(app, auth_token) - async def evaluate_intents(request: Request): + async def evaluate_intents(request: Request) -> HTTPResponse: """Evaluate intents against a Rasa model.""" validate_request_body( request, @@ -755,6 +850,8 @@ async def evaluate_intents(request: Request): "evaluate your model.", ) + test_data = _test_data_file_from_payload(request) + eval_agent = app.agent model_path = request.args.get("model", None) @@ -766,30 +863,29 @@ async def evaluate_intents(request: Request): model_path, model_server, app.agent.remote_storage ) - nlu_data = rasa.utils.io.create_temporary_file(request.body, mode="w+b") - data_path = os.path.abspath(nlu_data) + data_path = os.path.abspath(test_data) if not os.path.exists(eval_agent.model_directory): raise ErrorResponse(409, "Conflict", "Loaded model file not found.") model_directory = eval_agent.model_directory - _, nlu_model = get_model_subdirectories(model_directory) + _, nlu_model = model.get_model_subdirectories(model_directory) try: evaluation = run_evaluation(data_path, nlu_model) return response.json(evaluation) except Exception as e: - logger.debug(traceback.format_exc()) + logger.error(traceback.format_exc()) raise ErrorResponse( 500, "TestingError", - "An unexpected error occurred during evaluation. Error: {}".format(e), + f"An unexpected error occurred during evaluation. Error: {e}", ) @app.post("/model/predict") @requires_auth(app, auth_token) @ensure_loaded_agent(app, require_core_is_ready=True) - async def tracker_predict(request: Request): + async def tracker_predict(request: Request) -> HTTPResponse: """ Given a list of events, predicts the next action""" validate_request_body( request, @@ -809,14 +905,14 @@ async def tracker_predict(request: Request): raise ErrorResponse( 400, "BadRequest", - "Supplied events are not valid. {}".format(e), + f"Supplied events are not valid. {e}", {"parameter": "", "in": "body"}, ) try: policy_ensemble = app.agent.policy_ensemble probabilities, policy = policy_ensemble.probabilities_using_best_policy( - tracker, app.agent.domain + tracker, app.agent.domain, app.agent.interpreter ) scores = [ @@ -834,15 +930,13 @@ async def tracker_predict(request: Request): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, - "PredictionError", - "An unexpected error occurred. Error: {}".format(e), + 500, "PredictionError", f"An unexpected error occurred. Error: {e}" ) @app.post("/model/parse") @requires_auth(app, auth_token) @ensure_loaded_agent(app) - async def parse(request: Request): + async def parse(request: Request) -> HTTPResponse: validate_request_body( request, "No text message defined in request_body. Add text message to request body " @@ -860,9 +954,7 @@ async def parse(request: Request): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 400, - "ParsingError", - "An unexpected error occurred. Error: {}".format(e), + 400, "ParsingError", f"An unexpected error occurred. Error: {e}" ) response_data = emulator.normalise_response_json(parsed_data) @@ -871,12 +963,12 @@ async def parse(request: Request): except Exception as e: logger.debug(traceback.format_exc()) raise ErrorResponse( - 500, "ParsingError", "An unexpected error occurred. Error: {}".format(e) + 500, "ParsingError", f"An unexpected error occurred. Error: {e}" ) @app.put("/model") @requires_auth(app, auth_token) - async def load_model(request: Request): + async def load_model(request: Request) -> HTTPResponse: validate_request_body(request, "No path to model file defined in request_body.") model_path = request.json.get("model_file", None) @@ -891,7 +983,7 @@ async def load_model(request: Request): raise ErrorResponse( 400, "BadRequest", - "Supplied 'model_server' is not valid. Error: {}".format(e), + f"Supplied 'model_server' is not valid. Error: {e}", {"parameter": "model_server", "in": "body"}, ) @@ -899,44 +991,44 @@ async def load_model(request: Request): model_path, model_server, remote_storage, endpoints, app.agent.lock_store ) - logger.debug("Successfully loaded model '{}'.".format(model_path)) + logger.debug(f"Successfully loaded model '{model_path}'.") return response.json(None, status=204) @app.delete("/model") @requires_auth(app, auth_token) - async def unload_model(request: Request): + async def unload_model(request: Request) -> HTTPResponse: model_file = app.agent.model_directory app.agent = Agent(lock_store=app.agent.lock_store) - logger.debug("Successfully unloaded model '{}'.".format(model_file)) + logger.debug(f"Successfully unloaded model '{model_file}'.") return response.json(None, status=204) @app.get("/domain") @requires_auth(app, auth_token) @ensure_loaded_agent(app) - async def get_domain(request: Request): + async def get_domain(request: Request) -> HTTPResponse: """Get current domain in yaml or json format.""" - accepts = request.headers.get("Accept", default="application/json") + accepts = request.headers.get("Accept", default=JSON_CONTENT_TYPE) if accepts.endswith("json"): domain = app.agent.domain.as_dict() return response.json(domain) elif accepts.endswith("yml") or accepts.endswith("yaml"): domain_yaml = app.agent.domain.as_yaml() return response.text( - domain_yaml, status=200, content_type="application/x-yml" + domain_yaml, status=200, content_type=YAML_CONTENT_TYPE ) else: raise ErrorResponse( 406, "NotAcceptable", - "Invalid Accept header. Domain can be " - "provided as " - 'json ("Accept: application/json") or' - 'yml ("Accept: application/x-yml"). ' - "Make sure you've set the appropriate Accept " - "header.", + f"Invalid Accept header. Domain can be " + f"provided as " + f'json ("Accept: {JSON_CONTENT_TYPE}") or' + f'yml ("Accept: {YAML_CONTENT_TYPE}"). ' + f"Make sure you've set the appropriate Accept " + f"header.", ) return app @@ -980,3 +1072,142 @@ def _get_output_channel( matching_channels, CollectingOutputChannel(), ) + + +def _test_data_file_from_payload(request: Request) -> Text: + if request.headers.get("Content-type") == YAML_CONTENT_TYPE: + return str(_training_payload_from_yaml(request)["training_files"]) + else: + return rasa.utils.io.create_temporary_file( + request.body, mode="w+b", suffix=".md" + ) + + +def _training_payload_from_json(request: Request) -> Dict[Text, Union[Text, bool]]: + logger.debug( + "Extracting JSON payload with Markdown training data from request body." + ) + + request_payload = request.json + _validate_json_training_payload(request_payload) + + # create a temporary directory to store config, domain and + # training data + temp_dir = tempfile.mkdtemp() + + config_path = os.path.join(temp_dir, "config.yml") + + rasa.utils.io.write_text_file(request_payload["config"], config_path) + + if "nlu" in request_payload: + nlu_path = os.path.join(temp_dir, "nlu.md") + rasa.utils.io.write_text_file(request_payload["nlu"], nlu_path) + + if "stories" in request_payload: + stories_path = os.path.join(temp_dir, "stories.md") + rasa.utils.io.write_text_file(request_payload["stories"], stories_path) + + if "responses" in request_payload: + responses_path = os.path.join(temp_dir, "responses.md") + rasa.utils.io.write_text_file(request_payload["responses"], responses_path) + + domain_path = DEFAULT_DOMAIN_PATH + if "domain" in request_payload: + domain_path = os.path.join(temp_dir, "domain.yml") + rasa.utils.io.write_text_file(request_payload["domain"], domain_path) + + model_output_directory = _model_output_directory( + request_payload.get( + "save_to_default_model_directory", + request.args.get("save_to_default_model_directory", True), + ) + ) + + return dict( + domain=domain_path, + config=config_path, + training_files=temp_dir, + output=model_output_directory, + force_training=request_payload.get( + "force", request.args.get("force_training", False) + ), + ) + + +def _validate_json_training_payload(rjs: Dict): + if "config" not in rjs: + raise ErrorResponse( + 400, + "BadRequest", + "The training request is missing the required key `config`.", + {"parameter": "config", "in": "body"}, + ) + + if "nlu" not in rjs and "stories" not in rjs: + raise ErrorResponse( + 400, + "BadRequest", + "To train a Rasa model you need to specify at least one type of " + "training data. Add `nlu` and/or `stories` to the request.", + {"parameters": ["nlu", "stories"], "in": "body"}, + ) + + if "stories" in rjs and "domain" not in rjs: + raise ErrorResponse( + 400, + "BadRequest", + "To train a Rasa model with story training data, you also need to " + "specify the `domain`.", + {"parameter": "domain", "in": "body"}, + ) + + if "force" in rjs or "save_to_default_model_directory" in rjs: + common_utils.raise_warning( + "Specifying 'force' and 'save_to_default_model_directory' as part of the " + "JSON payload is deprecated. Please use the header arguments " + "'force_training' and 'save_to_default_model_directory'.", + category=FutureWarning, + docs=_docs("/api/http-api"), + ) + + +def _training_payload_from_yaml(request: Request,) -> Dict[Text, Union[Text, bool]]: + logger.debug("Extracting YAML training data from request body.") + + decoded = request.body.decode(rasa.utils.io.DEFAULT_ENCODING) + _validate_yaml_training_payload(decoded) + + temp_dir = tempfile.mkdtemp() + training_data = Path(temp_dir) / "data.yml" + rasa.utils.io.write_text_file(decoded, training_data) + + model_output_directory = _model_output_directory( + request.args.get("save_to_default_model_directory", True) + ) + + return dict( + domain=str(training_data), + config=str(training_data), + training_files=temp_dir, + output=model_output_directory, + force_training=request.args.get("force_training", False), + ) + + +def _model_output_directory(save_to_default_model_directory: bool) -> Text: + if save_to_default_model_directory: + return DEFAULT_MODELS_PATH + + return tempfile.gettempdir() + + +def _validate_yaml_training_payload(yaml_text: Text) -> None: + try: + RasaYAMLReader.validate(yaml_text) + except Exception as e: + raise ErrorResponse( + 400, + "BadRequest", + f"The request body does not contain valid YAML. Error: {e}", + help_url=DOCS_URL_TRAINING_DATA_NLU, + ) diff --git a/rasa/test.py b/rasa/test.py index 2ea20d093f07..275f5646bdef 100644 --- a/rasa/test.py +++ b/rasa/test.py @@ -1,7 +1,9 @@ import asyncio import logging import os -from typing import Text, Dict, Optional, List, Any +import typing +from typing import Text, Dict, Optional, List, Any, Iterable, Tuple, Union +from pathlib import Path import rasa.utils.io as io_utils from rasa.constants import ( @@ -9,15 +11,22 @@ RESULTS_FILE, NUMBER_OF_TRAINING_STORIES_FILE, ) -from rasa.cli.utils import print_error, print_warning +import rasa.cli.utils as cli_utils import rasa.utils.common as utils from rasa.exceptions import ModelNotFound +if typing.TYPE_CHECKING: + from rasa.core.agent import Agent + logger = logging.getLogger(__name__) -def test_core_models_in_directory(model_directory: Text, stories: Text, output: Text): - from rasa.core.test import compare_models_in_dir, plot_core_results +def test_core_models_in_directory( + model_directory: Text, stories: Text, output: Text +) -> None: + from rasa.core.test import compare_models_in_dir + + model_directory = _get_sanitized_model_directory(model_directory) loop = asyncio.get_event_loop() loop.run_until_complete(compare_models_in_dir(model_directory, stories, output)) @@ -27,6 +36,55 @@ def test_core_models_in_directory(model_directory: Text, stories: Text, output: plot_core_results(output, number_of_stories) +def plot_core_results(output_directory: Text, number_of_examples: List[int]) -> None: + """Plot core model comparison graph. + + Args: + output_directory: path to the output directory + number_of_examples: number of examples per run + """ + import rasa.utils.plotting as plotting_utils + + graph_path = os.path.join(output_directory, "core_model_comparison_graph.pdf") + + plotting_utils.plot_curve( + output_directory, + number_of_examples, + x_label_text="Number of stories present during training", + y_label_text="Number of correct test stories", + graph_path=graph_path, + ) + + +def _get_sanitized_model_directory(model_directory: Text) -> Text: + """Adjusts the `--model` argument of `rasa test core` when called with + `--evaluate-model-directory`. + + By default rasa uses the latest model for the `--model` parameter. However, for + `--evaluate-model-directory` we need a directory. This function checks if the + passed parameter is a model or an individual model file. + + Args: + model_directory: The model_directory argument that was given to + `test_core_models_in_directory`. + + Returns: The adjusted model_directory that should be used in + `test_core_models_in_directory`. + """ + import rasa.model + + p = Path(model_directory) + if p.is_file(): + if model_directory != rasa.model.get_latest_model(): + cli_utils.print_warning( + "You passed a file as '--model'. Will use the directory containing " + "this file instead." + ) + model_directory = str(p.parent) + + return model_directory + + def test_core_models(models: List[Text], stories: Text, output: Text): from rasa.core.test import compare_models @@ -40,13 +98,13 @@ def test( nlu_data: Text, endpoints: Optional[Text] = None, output: Text = DEFAULT_RESULTS_PATH, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ): - if kwargs is None: - kwargs = {} + if additional_arguments is None: + additional_arguments = {} - test_core(model, stories, endpoints, output, **kwargs) - test_nlu(model, nlu_data, output, kwargs) + test_core(model, stories, endpoints, output, additional_arguments) + test_nlu(model, nlu_data, output, additional_arguments) def test_core( @@ -54,9 +112,8 @@ def test_core( stories: Optional[Text] = None, endpoints: Optional[Text] = None, output: Text = DEFAULT_RESULTS_PATH, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ): - import rasa.core.test import rasa.core.utils as core_utils import rasa.model from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter @@ -64,8 +121,8 @@ def test_core( _endpoints = core_utils.AvailableEndpoints.read_endpoints(endpoints) - if kwargs is None: - kwargs = {} + if additional_arguments is None: + additional_arguments = {} if output: io_utils.create_directory(output) @@ -73,7 +130,7 @@ def test_core( try: unpacked_model = rasa.model.get_model(model) except ModelNotFound: - print_error( + cli_utils.print_error( "Unable to test: could not find a model. Use 'rasa train' to train a " "Rasa model and provide it via the '--model' argument." ) @@ -82,30 +139,39 @@ def test_core( core_path, nlu_path = rasa.model.get_model_subdirectories(unpacked_model) if not core_path: - print_error( + cli_utils.print_error( "Unable to test: could not find a Core model. Use 'rasa train' to train a " "Rasa model and provide it via the '--model' argument." ) - use_e2e = kwargs["e2e"] if "e2e" in kwargs else False + use_e2e = additional_arguments.get("e2e", False) _interpreter = RegexInterpreter() - if use_e2e: - if nlu_path: - _interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu) - else: - print_warning( - "No NLU model found. Using default 'RegexInterpreter' for end-to-end " - "evaluation." - ) + if nlu_path: + _interpreter = NaturalLanguageInterpreter.create(_endpoints.nlu or nlu_path) + elif use_e2e: + cli_utils.print_warning( + "No NLU model found. Using default 'RegexInterpreter' for end-to-end " + "evaluation." + ) _agent = Agent.load(unpacked_model, interpreter=_interpreter) - kwargs = utils.minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"]) + from rasa.core.test import test + + kwargs = utils.minimal_kwargs(additional_arguments, test, ["stories", "agent"]) + + _test_core(stories, _agent, output, **kwargs) + + +def _test_core( + stories: Optional[Text], agent: "Agent", output_directory: Text, **kwargs: Any +) -> None: + from rasa.core.test import test loop = asyncio.get_event_loop() loop.run_until_complete( - rasa.core.test(stories, _agent, out_directory=output, **kwargs) + test(stories, agent, out_directory=output_directory, **kwargs) ) @@ -113,7 +179,7 @@ def test_nlu( model: Optional[Text], nlu_data: Optional[Text], output_directory: Text = DEFAULT_RESULTS_PATH, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ): from rasa.nlu.test import run_evaluation from rasa.model import get_model @@ -121,7 +187,7 @@ def test_nlu( try: unpacked_model = get_model(model) except ModelNotFound: - print_error( + cli_utils.print_error( "Could not find any model. Use 'rasa train nlu' to train a " "Rasa model and provide it via the '--model' argument." ) @@ -132,10 +198,12 @@ def test_nlu( nlu_model = os.path.join(unpacked_model, "nlu") if os.path.exists(nlu_model): - kwargs = utils.minimal_kwargs(kwargs, run_evaluation, ["data_path", "model"]) + kwargs = utils.minimal_kwargs( + additional_arguments, run_evaluation, ["data_path", "model"] + ) run_evaluation(nlu_data, nlu_model, output_directory=output_directory, **kwargs) else: - print_error( + cli_utils.print_error( "Could not find any model. Use 'rasa train nlu' to train a " "Rasa model and provide it via the '--model' argument." ) @@ -155,7 +223,6 @@ def compare_nlu_models( from rasa.nlu.utils import write_json_to_file from rasa.utils.io import create_path from rasa.nlu.test import compare_nlu - from rasa.core.test import plot_nlu_results data = load_data(nlu) data = drop_intents_below_freq(data, cutoff=5) @@ -185,31 +252,130 @@ def compare_nlu_models( plot_nlu_results(output, training_examples_per_run) +def plot_nlu_results(output_directory: Text, number_of_examples: List[int]) -> None: + """Plot NLU model comparison graph. + + Args: + output_directory: path to the output directory + number_of_examples: number of examples per run + """ + import rasa.utils.plotting as plotting_utils + + graph_path = os.path.join(output_directory, "nlu_model_comparison_graph.pdf") + + plotting_utils.plot_curve( + output_directory, + number_of_examples, + x_label_text="Number of intent examples present during training", + y_label_text="Label-weighted average F1 score on test set", + graph_path=graph_path, + ) + + def perform_nlu_cross_validation( - config: Text, nlu: Text, output: Text, kwargs: Optional[Dict[Text, Any]] + config: Text, + nlu: Text, + output: Text, + additional_arguments: Optional[Dict[Text, Any]], ): import rasa.nlu.config from rasa.nlu.test import ( drop_intents_below_freq, cross_validate, - return_results, - return_entity_results, + log_results, + log_entity_results, ) - kwargs = kwargs or {} - folds = int(kwargs.get("folds", 3)) + additional_arguments = additional_arguments or {} + folds = int(additional_arguments.get("folds", 3)) nlu_config = rasa.nlu.config.load(config) data = rasa.nlu.training_data.load_data(nlu) data = drop_intents_below_freq(data, cutoff=folds) - kwargs = utils.minimal_kwargs(kwargs, cross_validate) - results, entity_results = cross_validate(data, folds, nlu_config, output, **kwargs) - logger.info("CV evaluation (n={})".format(folds)) + kwargs = utils.minimal_kwargs(additional_arguments, cross_validate) + results, entity_results, response_selection_results = cross_validate( + data, folds, nlu_config, output, **kwargs + ) + logger.info(f"CV evaluation (n={folds})") if any(results): logger.info("Intent evaluation results") - return_results(results.train, "train") - return_results(results.test, "test") + log_results(results.train, "train") + log_results(results.test, "test") if any(entity_results): logger.info("Entity evaluation results") - return_entity_results(entity_results.train, "train") - return_entity_results(entity_results.test, "test") + log_entity_results(entity_results.train, "train") + log_entity_results(entity_results.test, "test") + if any(response_selection_results): + logger.info("Response Selection evaluation results") + log_results(response_selection_results.train, "train") + log_results(response_selection_results.test, "test") + + +def get_evaluation_metrics( + targets: Iterable[Any], + predictions: Iterable[Any], + output_dict: bool = False, + exclude_label: Optional[Text] = None, +) -> Tuple[Union[Text, Dict[Text, Dict[Text, float]]], float, float, float]: + """Compute the f1, precision, accuracy and summary report from sklearn. + + Args: + targets: target labels + predictions: predicted labels + output_dict: if True sklearn returns a summary report as dict, if False the + report is in string format + exclude_label: labels to exclude from evaluation + + Returns: + Report from sklearn, precision, f1, and accuracy values. + """ + from sklearn import metrics + + targets = clean_labels(targets) + predictions = clean_labels(predictions) + + labels = get_unique_labels(targets, exclude_label) + if not labels: + logger.warning("No labels to evaluate. Skip evaluation.") + return {}, 0.0, 0.0, 0.0 + + report = metrics.classification_report( + targets, predictions, labels=labels, output_dict=output_dict + ) + precision = metrics.precision_score( + targets, predictions, labels=labels, average="weighted" + ) + f1 = metrics.f1_score(targets, predictions, labels=labels, average="weighted") + accuracy = metrics.accuracy_score(targets, predictions) + + return report, precision, f1, accuracy + + +def clean_labels(labels: Iterable[Text]) -> List[Text]: + """Remove `None` labels. sklearn metrics do not support them. + + Args: + labels: list of labels + + Returns: + Cleaned labels. + """ + return [label if label is not None else "" for label in labels] + + +def get_unique_labels( + targets: Iterable[Text], exclude_label: Optional[Text] +) -> List[Text]: + """Get unique labels. Exclude 'exclude_label' if specified. + + Args: + targets: labels + exclude_label: label to exclude + + Returns: + Unique labels. + """ + labels = set(targets) + if exclude_label and exclude_label in labels: + labels.remove(exclude_label) + return list(labels) diff --git a/rasa/train.py b/rasa/train.py index a520b38fb108..bef2f7818ce8 100644 --- a/rasa/train.py +++ b/rasa/train.py @@ -4,9 +4,12 @@ from contextlib import ExitStack from typing import Text, Optional, List, Union, Dict +from rasa.core.interpreter import NaturalLanguageInterpreter from rasa.importers.importer import TrainingDataImporter from rasa import model +from rasa.model import FingerprintComparisonResult from rasa.core.domain import Domain +from rasa.nlu.model import Interpreter from rasa.utils.common import TempDirectoryPath from rasa.cli.utils import ( @@ -16,7 +19,11 @@ bcolors, print_color, ) -from rasa.constants import DEFAULT_MODELS_PATH +from rasa.constants import ( + DEFAULT_MODELS_PATH, + DEFAULT_CORE_SUBDIRECTORY_NAME, + DEFAULT_NLU_SUBDIRECTORY_NAME, +) def train( @@ -26,9 +33,18 @@ def train( output: Text = DEFAULT_MODELS_PATH, force_training: bool = False, fixed_model_name: Optional[Text] = None, - kwargs: Optional[Dict] = None, + persist_nlu_training_data: bool = False, + core_additional_arguments: Optional[Dict] = None, + nlu_additional_arguments: Optional[Dict] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, ) -> Optional[Text]: - loop = asyncio.get_event_loop() + if loop is None: + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return loop.run_until_complete( train_async( domain=domain, @@ -37,7 +53,9 @@ def train( output_path=output, force_training=force_training, fixed_model_name=fixed_model_name, - kwargs=kwargs, + persist_nlu_training_data=persist_nlu_training_data, + core_additional_arguments=core_additional_arguments, + nlu_additional_arguments=nlu_additional_arguments, ) ) @@ -50,7 +68,8 @@ async def train_async( force_training: bool = False, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, - kwargs: Optional[Dict] = None, + core_additional_arguments: Optional[Dict] = None, + nlu_additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Rasa model (Core and NLU). @@ -61,7 +80,11 @@ async def train_async( output_path: Output path. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. - kwargs: Additional training parameters. + persist_nlu_training_data: `True` if the NLU training data should be persisted + with the model. + core_additional_arguments: Additional training parameters for core training. + nlu_additional_arguments: Additional training parameters forwarded to training + method of each NLU component. Returns: Path of the trained model archive. @@ -74,6 +97,7 @@ async def train_async( train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp())) domain = await file_importer.get_domain() + if domain.is_empty(): return await handle_domain_if_not_exists( file_importer, output_path, fixed_model_name @@ -86,7 +110,8 @@ async def train_async( force_training, fixed_model_name, persist_nlu_training_data, - kwargs, + core_additional_arguments=core_additional_arguments, + nlu_additional_arguments=nlu_additional_arguments, ) @@ -110,28 +135,30 @@ async def _train_async_internal( force_training: bool, fixed_model_name: Optional[Text], persist_nlu_training_data: bool, - kwargs: Optional[Dict], + core_additional_arguments: Optional[Dict] = None, + nlu_additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Rasa model (Core and NLU). Use only from `train_async`. Args: - domain: Path to the domain file. - config: Path to the config for Core and NLU. + file_importer: `TrainingDataImporter` which supplies the training data. train_path: Directory in which to train the model. - nlu_data_directory: Path to NLU training files. - story_directory: Path to Core training files. output_path: Output path. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. - kwargs: Additional training parameters. + persist_nlu_training_data: `True` if the NLU training data should be persisted + with the model. + core_additional_arguments: Additional training parameters for core training. + nlu_additional_arguments: Additional training parameters forwarded to training + method of each NLU component. Returns: Path of the trained model archive. """ - new_fingerprint = await model.model_fingerprint(file_importer) - stories = await file_importer.get_stories() - nlu_data = await file_importer.get_nlu_data() + stories, nlu_data = await asyncio.gather( + file_importer.get_stories(), file_importer.get_nlu_data() + ) if stories.is_empty() and nlu_data.is_empty(): print_error( @@ -147,6 +174,7 @@ async def _train_async_internal( output=output_path, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, + additional_arguments=nlu_additional_arguments, ) if nlu_data.is_empty(): @@ -155,25 +183,28 @@ async def _train_async_internal( file_importer, output=output_path, fixed_model_name=fixed_model_name, - kwargs=kwargs, + additional_arguments=core_additional_arguments, ) + new_fingerprint = await model.model_fingerprint(file_importer) old_model = model.get_latest_model(output_path) - retrain_core, retrain_nlu = model.should_retrain( - new_fingerprint, old_model, train_path - ) + fingerprint_comparison = FingerprintComparisonResult(force_training=force_training) + if not force_training: + fingerprint_comparison = model.should_retrain( + new_fingerprint, old_model, train_path + ) - if force_training or retrain_core or retrain_nlu: + if fingerprint_comparison.is_training_required(): await _do_training( file_importer, output_path=output_path, train_path=train_path, - force_training=force_training, - retrain_core=retrain_core, - retrain_nlu=retrain_nlu, + fingerprint_comparison_result=fingerprint_comparison, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, - kwargs=kwargs, + core_additional_arguments=core_additional_arguments, + nlu_additional_arguments=nlu_additional_arguments, + old_model_zip_path=old_model, ) return model.package_model( @@ -194,43 +225,78 @@ async def _do_training( file_importer: TrainingDataImporter, output_path: Text, train_path: Text, - force_training: bool = False, - retrain_core: bool = True, - retrain_nlu: bool = True, + fingerprint_comparison_result: Optional[FingerprintComparisonResult] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, - kwargs: Optional[Dict] = None, + core_additional_arguments: Optional[Dict] = None, + nlu_additional_arguments: Optional[Dict] = None, + old_model_zip_path: Optional[Text] = None, ): + if not fingerprint_comparison_result: + fingerprint_comparison_result = FingerprintComparisonResult() - if force_training or retrain_core: - await _train_core_with_validated_data( + interpreter_path = None + if fingerprint_comparison_result.should_retrain_nlu(): + model_path = await _train_nlu_with_validated_data( file_importer, output=output_path, train_path=train_path, fixed_model_name=fixed_model_name, - kwargs=kwargs, + persist_nlu_training_data=persist_nlu_training_data, + additional_arguments=nlu_additional_arguments, ) + interpreter_path = os.path.join(model_path, DEFAULT_NLU_SUBDIRECTORY_NAME) else: print_color( - "Core stories/configuration did not change. No need to retrain Core model.", + "NLU data/configuration did not change. No need to retrain NLU model.", color=bcolors.OKBLUE, ) - if force_training or retrain_nlu: - await _train_nlu_with_validated_data( + if fingerprint_comparison_result.should_retrain_core(): + await _train_core_with_validated_data( file_importer, output=output_path, train_path=train_path, fixed_model_name=fixed_model_name, - persist_nlu_training_data=persist_nlu_training_data, + additional_arguments=core_additional_arguments, + interpreter=_load_interpreter(interpreter_path) + or _interpreter_from_previous_model(old_model_zip_path), ) + elif fingerprint_comparison_result.should_retrain_nlg(): + print_color( + "Core stories/configuration did not change. " + "Only the templates section has been changed. A new model with " + "the updated templates will be created.", + color=bcolors.OKBLUE, + ) + await model.update_model_with_new_domain(file_importer, train_path) else: print_color( - "NLU data/configuration did not change. No need to retrain NLU model.", + "Core stories/configuration did not change. No need to retrain Core model.", color=bcolors.OKBLUE, ) +def _load_interpreter( + interpreter_path: Optional[Text], +) -> Optional[NaturalLanguageInterpreter]: + if interpreter_path: + return NaturalLanguageInterpreter.create(interpreter_path) + + return None + + +def _interpreter_from_previous_model( + old_model_zip_path: Optional[Text], +) -> Optional[NaturalLanguageInterpreter]: + if not old_model_zip_path: + return None + + with model.unpack_model(old_model_zip_path) as unpacked: + _, old_nlu = model.get_model_subdirectories(unpacked) + return NaturalLanguageInterpreter.create(old_nlu) + + def train_core( domain: Union[Domain, Text], config: Text, @@ -238,7 +304,7 @@ def train_core( output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: loop = asyncio.get_event_loop() return loop.run_until_complete( @@ -249,7 +315,7 @@ def train_core( output=output, train_path=train_path, fixed_model_name=fixed_model_name, - kwargs=kwargs, + additional_arguments=additional_arguments, ) ) @@ -261,7 +327,7 @@ async def train_core_async( output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Core model. @@ -274,7 +340,7 @@ async def train_core_async( directory, otherwise in the provided directory. fixed_model_name: Name of model to be stored. uncompress: If `True` the model will not be compressed. - kwargs: Additional training parameters. + additional_arguments: Additional training parameters. Returns: If `train_path` is given it returns the path to the model archive, @@ -305,7 +371,7 @@ async def train_core_async( output=output, train_path=train_path, fixed_model_name=fixed_model_name, - kwargs=kwargs, + additional_arguments=additional_arguments, ) @@ -314,7 +380,8 @@ async def _train_core_with_validated_data( output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, - kwargs: Optional[Dict] = None, + additional_arguments: Optional[Dict] = None, + interpreter: Optional[Interpreter] = None, ) -> Optional[Text]: """Train Core with validated training and config data.""" @@ -330,14 +397,16 @@ async def _train_core_with_validated_data( # normal (not compare) training print_color("Training Core model...", color=bcolors.OKBLUE) - domain = await file_importer.get_domain() - config = await file_importer.get_config() + domain, config = await asyncio.gather( + file_importer.get_domain(), file_importer.get_config() + ) await rasa.core.train( domain_file=domain, training_resource=file_importer, - output_path=os.path.join(_train_path, "core"), + output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME), policy_config=config, - kwargs=kwargs, + additional_arguments=additional_arguments, + interpreter=interpreter, ) print_color("Core model training completed.", color=bcolors.OKBLUE) @@ -361,6 +430,8 @@ def train_nlu( output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, + persist_nlu_training_data: bool = False, + additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains an NLU model. @@ -371,7 +442,11 @@ def train_nlu( train_path: If `None` the model will be trained in a temporary directory, otherwise in the provided directory. fixed_model_name: Name of the model to be stored. - uncompress: If `True` the model will not be compressed. + persist_nlu_training_data: `True` if the NLU training data should be persisted + with the model. + additional_arguments: Additional training parameters which will be passed to + the `train` method of each component. + Returns: If `train_path` is given it returns the path to the model archive, @@ -381,7 +456,15 @@ def train_nlu( loop = asyncio.get_event_loop() return loop.run_until_complete( - _train_nlu_async(config, nlu_data, output, train_path, fixed_model_name) + _train_nlu_async( + config, + nlu_data, + output, + train_path, + fixed_model_name, + persist_nlu_training_data, + additional_arguments, + ) ) @@ -392,7 +475,15 @@ async def _train_nlu_async( train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, + additional_arguments: Optional[Dict] = None, ): + if not nlu_data: + print_error( + "No NLU data given. Please provide NLU data in order to train " + "a Rasa NLU model using the '--nlu' argument." + ) + return + # training NLU only hence the training files still have to be selected file_importer = TrainingDataImporter.load_nlu_importer_from_config( config, training_data_paths=[nlu_data] @@ -401,8 +492,9 @@ async def _train_nlu_async( training_datas = await file_importer.get_nlu_data() if training_datas.is_empty(): print_error( - "No NLU data given. Please provide NLU data in order to train " - "a Rasa NLU model using the '--nlu' argument." + f"Path '{nlu_data}' doesn't contain valid NLU data in it. " + f"Please verify the data format. " + f"The NLU model training will be skipped now." ) return @@ -412,6 +504,7 @@ async def _train_nlu_async( train_path=train_path, fixed_model_name=fixed_model_name, persist_nlu_training_data=persist_nlu_training_data, + additional_arguments=additional_arguments, ) @@ -421,11 +514,15 @@ async def _train_nlu_with_validated_data( train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, + additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Train NLU with validated training and config data.""" import rasa.nlu.train + if additional_arguments is None: + additional_arguments = {} + with ExitStack() as stack: if train_path: # If the train path was provided, do nothing on exit. @@ -441,6 +538,7 @@ async def _train_nlu_with_validated_data( _train_path, fixed_model_name="nlu", persist_nlu_training_data=persist_nlu_training_data, + **additional_arguments, ) print_color("NLU model training completed.", color=bcolors.OKBLUE) diff --git a/rasa/utils/common.py b/rasa/utils/common.py index becbaec725d3..cbea0e731453 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -1,17 +1,20 @@ import logging import os import shutil +import warnings from types import TracebackType -from typing import Any, Callable, Dict, List, Text, Optional, Type +from typing import Any, Callable, Dict, List, Optional, Text, Type, Collection import rasa.core.utils import rasa.utils.io +from rasa.cli import utils +from rasa.cli.utils import bcolors from rasa.constants import ( - GLOBAL_USER_CONFIG_PATH, DEFAULT_LOG_LEVEL, - ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL_LIBRARIES, + ENV_LOG_LEVEL, ENV_LOG_LEVEL_LIBRARIES, + GLOBAL_USER_CONFIG_PATH, ) logger = logging.getLogger(__name__) @@ -68,27 +71,48 @@ def set_log_level(log_level: Optional[int] = None): update_tensorflow_log_level() update_asyncio_log_level() update_apscheduler_log_level() + update_socketio_log_level() os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level) -def update_apscheduler_log_level(): +def update_apscheduler_log_level() -> None: + log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES) + + apscheduler_loggers = [ + "apscheduler", + "apscheduler.scheduler", + "apscheduler.executors", + "apscheduler.executors.default", + ] + + for logger_name in apscheduler_loggers: + logging.getLogger(logger_name).setLevel(log_level) + logging.getLogger(logger_name).propagate = False + + +def update_socketio_log_level() -> None: log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES) - logging.getLogger("apscheduler.scheduler").setLevel(log_level) - logging.getLogger("apscheduler.scheduler").propagate = False - logging.getLogger("apscheduler.executors.default").setLevel(log_level) - logging.getLogger("apscheduler.executors.default").propagate = False + socketio_loggers = ["websockets.protocol", "engineio.server", "socketio.server"] + for logger_name in socketio_loggers: + logging.getLogger(logger_name).setLevel(log_level) + logging.getLogger(logger_name).propagate = False -def update_tensorflow_log_level(): + +def update_tensorflow_log_level() -> None: """Set the log level of Tensorflow to the log level specified in the environment variable 'LOG_LEVEL_LIBRARIES'.""" + + # Disables libvinfer, tensorRT, cuda, AVX2 and FMA warnings (CPU support). This variable needs to be set before the + # first import since some warnings are raised on the first import. + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" + import tensorflow as tf log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES) - os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # disables AVX2 FMA warnings (CPU support) if log_level == "DEBUG": tf_log_level = tf.compat.v1.logging.DEBUG elif log_level == "INFO": @@ -127,13 +151,25 @@ def update_sanic_log_level(log_file: Optional[Text] = None): access_logger.addHandler(file_handler) -def update_asyncio_log_level(): +def update_asyncio_log_level() -> None: """Set the log level of asyncio to the log level specified in the environment variable 'LOG_LEVEL_LIBRARIES'.""" log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES) logging.getLogger("asyncio").setLevel(log_level) +def set_log_and_warnings_filters() -> None: + """ + Set log filters on the root logger, and duplicate filters for warnings. + + Filters only propagate on handlers, not loggers. + """ + for handler in logging.getLogger().handlers: + handler.addFilter(RepeatedLogFilter()) + + warnings.filterwarnings("once", category=UserWarning) + + def obtain_verbosity() -> int: """Returns a verbosity level according to the set log level.""" log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL) @@ -159,6 +195,14 @@ def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]: return sorted(dicts, key=lambda d: list(d.keys())[0]) +def transform_collection_to_sentence(collection: Collection[Text]) -> Text: + """Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'.""" + x = list(collection) + if len(x) >= 2: + return ", ".join(map(str, x[:-1])) + " and " + x[-1] + return "".join(collection) + + # noinspection PyUnresolvedReferences def class_from_module_path( module_path: Text, lookup_path: Optional[Text] = None @@ -184,7 +228,7 @@ def class_from_module_path( m = importlib.import_module(lookup_path) return getattr(m, module_path) else: - raise ImportError("Cannot retrieve class from path {}.".format(module_path)) + raise ImportError(f"Cannot retrieve class from path {module_path}.") def minimal_kwargs( @@ -224,9 +268,7 @@ def write_global_config_value(name: Text, value: Any) -> None: c[name] = value rasa.core.utils.dump_obj_as_yaml_to_file(GLOBAL_USER_CONFIG_PATH, c) except Exception as e: - logger.warning( - "Failed to write global config. Error: {}. Skipping." "".format(e) - ) + logger.warning(f"Failed to write global config. Error: {e}. Skipping.") def read_global_config_value(name: Text, unavailable_ok: bool = True) -> Any: @@ -236,7 +278,7 @@ def not_found(): if unavailable_ok: return None else: - raise ValueError("Configuration '{}' key not found.".format(name)) + raise ValueError(f"Configuration '{name}' key not found.") if not os.path.exists(GLOBAL_USER_CONFIG_PATH): return not_found() @@ -253,13 +295,28 @@ def mark_as_experimental_feature(feature_name: Text) -> None: """Warns users that they are using an experimental feature.""" logger.warning( - "The {} is currently experimental and might change or be " + f"The {feature_name} is currently experimental and might change or be " "removed in the future 🔬 Please share your feedback on it in the " "forum (https://forum.rasa.com) to help us make this feature " - "ready for production.".format(feature_name) + "ready for production." ) +def update_existing_keys( + original: Dict[Any, Any], updates: Dict[Any, Any] +) -> Dict[Any, Any]: + """Iterate through all the updates and update a value in the original dictionary. + + If the updates contain a key that is not present in the original dict, it will + be ignored.""" + + updated = original.copy() + for k, v in updates.items(): + if k in updated: + updated[k] = v + return updated + + def lazy_property(function: Callable) -> Any: """Allows to avoid recomputing a property over and over. @@ -276,3 +333,72 @@ def _lazyprop(self): return getattr(self, attr_name) return _lazyprop + + +def raise_warning( + message: Text, + category: Optional[Type[Warning]] = None, + docs: Optional[Text] = None, + **kwargs: Any, +) -> None: + """Emit a `warnings.warn` with sensible defaults and a colored warning msg.""" + + original_formatter = warnings.formatwarning + + def should_show_source_line() -> bool: + if "stacklevel" not in kwargs: + if category == UserWarning or category is None: + return False + if category == FutureWarning: + return False + return True + + def formatwarning( + message: Text, + category: Optional[Type[Warning]], + filename: Text, + lineno: Optional[int], + line: Optional[Text] = None, + ): + """Function to format a warning the standard way.""" + + if not should_show_source_line(): + if docs: + line = f"More info at {docs}" + else: + line = "" + + formatted_message = original_formatter( + message, category, filename, lineno, line + ) + return utils.wrap_with_color(formatted_message, color=bcolors.WARNING) + + if "stacklevel" not in kwargs: + # try to set useful defaults for the most common warning categories + if category == DeprecationWarning: + kwargs["stacklevel"] = 3 + elif category in (UserWarning, FutureWarning): + kwargs["stacklevel"] = 2 + + warnings.formatwarning = formatwarning + warnings.warn(message, category=category, **kwargs) + warnings.formatwarning = original_formatter + + +class RepeatedLogFilter(logging.Filter): + """Filter repeated log records.""" + + last_log = None + + def filter(self, record): + current_log = ( + record.levelno, + record.pathname, + record.lineno, + record.msg, + record.args, + ) + if current_log != self.last_log: + self.last_log = current_log + return True + return False diff --git a/rasa/utils/endpoints.py b/rasa/utils/endpoints.py index 36c419483511..033b3ff34da3 100644 --- a/rasa/utils/endpoints.py +++ b/rasa/utils/endpoints.py @@ -1,10 +1,9 @@ +import aiohttp import logging import os - -import aiohttp -from typing import Any, Optional, Text, Dict - +from aiohttp.client_exceptions import ContentTypeError from sanic.request import Request +from typing import Any, Optional, Text, Dict import rasa.utils.io from rasa.constants import DEFAULT_REQUEST_TIMEOUT @@ -43,14 +42,20 @@ def concat_url(base: Text, subpath: Optional[Text]) -> Text: Strips leading slashes from the subpath if necessary. This behaves differently than `urlparse.urljoin` and will not treat the subpath as a base url if it starts with `/` but will always append it to the - `base`.""" + `base`. + Args: + base: Base URL. + subpath: Optional path to append to the base URL. + + Returns: + Concatenated URL with base and subpath. + """ if not subpath: if base.endswith("/"): logger.debug( - "The URL '{}' has a trailing slash. Please make sure the " - "target server supports trailing slashes for this " - "endpoint.".format(base) + f"The URL '{base}' has a trailing slash. Please make sure the " + f"target server supports trailing slashes for this endpoint." ) return base @@ -62,7 +67,7 @@ def concat_url(base: Text, subpath: Optional[Text]) -> Text: return url + subpath -class EndpointConfig(object): +class EndpointConfig: """Configuration for an external HTTP endpoint.""" def __init__( @@ -73,7 +78,7 @@ def __init__( basic_auth: Dict[Text, Text] = None, token: Optional[Text] = None, token_name: Text = "token", - **kwargs + **kwargs, ): self.url = url self.params = params if params else {} @@ -84,7 +89,7 @@ def __init__( self.type = kwargs.pop("store_type", kwargs.pop("type", None)) self.kwargs = kwargs - def session(self): + def session(self) -> aiohttp.ClientSession: # create authentication parameters if self.basic_auth: auth = aiohttp.BasicAuth( @@ -99,7 +104,9 @@ def session(self): timeout=aiohttp.ClientTimeout(total=DEFAULT_REQUEST_TIMEOUT), ) - def combine_parameters(self, kwargs=None): + def combine_parameters( + self, kwargs: Optional[Dict[Text, Any]] = None + ) -> Dict[Text, Any]: # construct GET parameters params = self.params.copy() @@ -117,10 +124,9 @@ async def request( method: Text = "post", subpath: Optional[Text] = None, content_type: Optional[Text] = "application/json", - return_method: Text = "json", - **kwargs: Any - ): - """Send a HTTP request to the endpoint. + **kwargs: Any, + ) -> Optional[Any]: + """Send a HTTP request to the endpoint. Return json response, if available. All additional arguments will get passed through to aiohttp's `session.request`.""" @@ -141,19 +147,22 @@ async def request( url, headers=headers, params=self.combine_parameters(kwargs), - **kwargs - ) as resp: - if resp.status >= 400: + **kwargs, + ) as response: + if response.status >= 400: raise ClientResponseError( - resp.status, resp.reason, await resp.content.read() + response.status, response.reason, await response.content.read() ) - return await getattr(resp, return_method)() + try: + return await response.json() + except ContentTypeError: + return None @classmethod - def from_dict(cls, data): + def from_dict(cls, data) -> "EndpointConfig": return EndpointConfig(**data) - def copy(self): + def copy(self) -> "EndpointConfig": return EndpointConfig( self.url, self.params, @@ -161,10 +170,10 @@ def copy(self): self.basic_auth, self.token, self.token_name, - **self.kwargs + **self.kwargs, ) - def __eq__(self, other): + def __eq__(self, other) -> bool: if isinstance(self, type(other)): return ( other.url == self.url @@ -177,16 +186,16 @@ def __eq__(self, other): else: return False - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) class ClientResponseError(aiohttp.ClientError): - def __init__(self, status, message, text): + def __init__(self, status: int, message: Text, text: Text) -> None: self.status = status self.message = message self.text = text - super().__init__("{}, {}, body='{}'".format(status, message, text)) + super().__init__(f"{status}, {message}, body='{text}'") def bool_arg(request: Request, name: Text, default: bool = True) -> bool: @@ -214,5 +223,5 @@ def float_arg( try: return float(str(arg)) except (ValueError, TypeError): - logger.warning("Failed to convert '{}' to float.".format(arg)) + logger.warning(f"Failed to convert '{arg}' to float.") return default diff --git a/rasa/utils/io.py b/rasa/utils/io.py index 990af4b17a23..d1a7743d1d3d 100644 --- a/rasa/utils/io.py +++ b/rasa/utils/io.py @@ -1,27 +1,40 @@ import asyncio import errno +import json import logging import os +import pickle +import re import tarfile import tempfile -import typing import warnings import zipfile import glob from asyncio import AbstractEventLoop -from io import BytesIO as IOReader -from typing import Text, Any, Dict, Union, List, Type, Callable +from collections import OrderedDict +from io import BytesIO as IOReader, StringIO +from pathlib import Path +from typing import Text, Any, Dict, Union, List, Type, Callable, TYPE_CHECKING, Match import ruamel.yaml as yaml -import simplejson +from ruamel.yaml import RoundTripRepresenter -from rasa.constants import ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL +from rasa.constants import ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL, YAML_VERSION -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from prompt_toolkit.validation import Validator +DEFAULT_ENCODING = "utf-8" +ESCAPE_DCT = {"\b": "\\b", "\f": "\\f", "\n": "\\n", "\r": "\\r", "\t": "\\t"} +ESCAPE = re.compile(f'[{"".join(ESCAPE_DCT.values())}]') +UNESCAPE_DCT = {espaced_char: char for char, espaced_char in ESCAPE_DCT.items()} +UNESCAPE = re.compile(f'[{"".join(UNESCAPE_DCT.values())}]') +GROUP_COMPLETE_MATCH = 0 -def configure_colored_logging(loglevel): +YAML_LINE_MAX_WIDTH = 4096 + + +def configure_colored_logging(loglevel: Text) -> None: import coloredlogs loglevel = loglevel or os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL) @@ -39,7 +52,9 @@ def configure_colored_logging(loglevel): ) -def enable_async_loop_debugging(event_loop: AbstractEventLoop) -> AbstractEventLoop: +def enable_async_loop_debugging( + event_loop: AbstractEventLoop, slow_callback_duration: float = 0.1 +) -> AbstractEventLoop: logging.info( "Enabling coroutine debugging. Loop id {}.".format(id(asyncio.get_event_loop())) ) @@ -49,7 +64,7 @@ def enable_async_loop_debugging(event_loop: AbstractEventLoop) -> AbstractEventL # Make the threshold for "slow" tasks very very small for # illustration. The default is 0.1 (= 100 milliseconds). - event_loop.slow_callback_duration = 0.001 + event_loop.slow_callback_duration = slow_callback_duration # Report all mistakes managing asynchronous resources. warnings.simplefilter("always", ResourceWarning) @@ -68,11 +83,8 @@ def construct_yaml_str(self, node): yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str) -def replace_environment_variables(): +def replace_environment_variables() -> None: """Enable yaml loader to process the environment variables in the yaml.""" - import re - import os - # eg. ${USER_NAME}, ${PASSWORD} env_var_pattern = re.compile(r"^(.*)\$\{(.*)\}(.*)$") yaml.add_implicit_resolver("!env_var", env_var_pattern) @@ -93,52 +105,54 @@ def env_var_constructor(loader, node): yaml.SafeConstructor.add_constructor("!env_var", env_var_constructor) -def read_yaml(content: Text) -> Union[List[Any], Dict[Text, Any]]: +def read_yaml(content: Text) -> Any: """Parses yaml from a text. - Args: + Args: content: A text containing yaml content. + + Raises: + ruamel.yaml.parser.ParserError: If there was an error when parsing the YAML. """ fix_yaml_loader() replace_environment_variables() yaml_parser = yaml.YAML(typ="safe") - yaml_parser.version = "1.2" - yaml_parser.unicode_supplementary = True + yaml_parser.version = YAML_VERSION + yaml_parser.preserve_quotes = True - # noinspection PyUnresolvedReferences - try: - return yaml_parser.load(content) or {} - except yaml.scanner.ScannerError: - # A `ruamel.yaml.scanner.ScannerError` might happen due to escaped - # unicode sequences that form surrogate pairs. Try converting the input - # to a parsable format based on - # https://stackoverflow.com/a/52187065/3429596. + if _is_ascii(content): + # Required to make sure emojis are correctly parsed content = ( content.encode("utf-8") .decode("raw_unicode_escape") .encode("utf-16", "surrogatepass") .decode("utf-16") ) - return yaml_parser.load(content) or {} + return yaml_parser.load(content) or {} -def read_file(filename: Text, encoding: Text = "utf-8") -> Any: + +def _is_ascii(text: Text) -> bool: + return all(ord(character) < 128 for character in text) + + +def read_file(filename: Union[Text, Path], encoding: Text = DEFAULT_ENCODING) -> Any: """Read text from a file.""" try: with open(filename, encoding=encoding) as f: return f.read() except FileNotFoundError: - raise ValueError("File '{}' does not exist.".format(filename)) + raise ValueError(f"File '{filename}' does not exist.") -def read_json_file(filename: Text) -> Any: +def read_json_file(filename: Union[Text, Path]) -> Any: """Read json from a file.""" content = read_file(filename) try: - return simplejson.loads(content) + return json.loads(content) except ValueError as e: raise ValueError( "Failed to read json from '{}'. Error: " @@ -146,13 +160,42 @@ def read_json_file(filename: Text) -> Any: ) +def dump_obj_as_json_to_file(filename: Union[Text, Path], obj: Any) -> None: + """Dump an object as a json string to a file.""" + + write_text_file(json.dumps(obj, indent=2), filename) + + +def pickle_dump(filename: Union[Text, Path], obj: Any) -> None: + """Saves object to file. + + Args: + filename: the filename to save the object to + obj: the object to store + """ + with open(filename, "wb") as f: + pickle.dump(obj, f) + + +def pickle_load(filename: Union[Text, Path]) -> Any: + """Loads an object from a file. + + Args: + filename: the filename to load the object from + + Returns: the loaded object + """ + with open(filename, "rb") as f: + return pickle.load(f) + + def read_config_file(filename: Text) -> Dict[Text, Any]: """Parses a yaml configuration file. Content needs to be a dictionary - Args: + Args: filename: The path to the file which should be read. """ - content = read_yaml(read_file(filename, "utf-8")) + content = read_yaml(read_file(filename)) if content is None: return {} @@ -166,13 +209,13 @@ def read_config_file(filename: Text) -> Dict[Text, Any]: ) -def read_yaml_file(filename: Text) -> Union[List[Any], Dict[Text, Any]]: +def read_yaml_file(filename: Union[Text, Path]) -> Union[List[Any], Dict[Text, Any]]: """Parses a yaml file. - Args: + Args: filename: The path to the file which should be read. """ - return read_yaml(read_file(filename, "utf-8")) + return read_yaml(read_file(filename, DEFAULT_ENCODING)) def unarchive(byte_array: bytes, directory: Text) -> Text: @@ -192,15 +235,88 @@ def unarchive(byte_array: bytes, directory: Text) -> Text: return directory -def write_yaml_file(data: Dict, filename: Text): - """Writes a yaml file. +def convert_to_ordered_dict(obj: Any) -> Any: + """Convert object to an `OrderedDict`. + + Args: + obj: Object to convert. + + Returns: + An `OrderedDict` with all nested dictionaries converted if `obj` is a + dictionary, otherwise the object itself. + """ + # use recursion on lists + if isinstance(obj, list): + return [convert_to_ordered_dict(element) for element in obj] + + if isinstance(obj, dict): + out = OrderedDict() + # use recursion on dictionaries + for k, v in obj.items(): + out[k] = convert_to_ordered_dict(v) + + return out + + # return all other objects + return obj + - Args: +def _enable_ordered_dict_yaml_dumping() -> None: + """Ensure that `OrderedDict`s are dumped so that the order of keys is respected.""" + yaml.add_representer( + OrderedDict, + RoundTripRepresenter.represent_dict, + representer=RoundTripRepresenter, + ) + + +def write_yaml( + data: Any, + target: Union[Text, Path, StringIO], + should_preserve_key_order: bool = False, +) -> None: + """Writes a yaml to the file or to the stream + + Args: data: The data to write. - filename: The path to the file which should be written. + target: The path to the file which should be written or a stream object + should_preserve_key_order: Whether to force preserve key order in `data`. """ - with open(filename, "w", encoding="utf-8") as outfile: - yaml.dump(data, outfile, default_flow_style=False) + _enable_ordered_dict_yaml_dumping() + + if should_preserve_key_order: + data = convert_to_ordered_dict(data) + + dumper = yaml.YAML() + # no wrap lines + dumper.width = YAML_LINE_MAX_WIDTH + + if isinstance(target, StringIO): + dumper.dump(data, target) + return + + with Path(target).open("w", encoding=DEFAULT_ENCODING) as outfile: + dumper.dump(data, outfile) + + +def write_text_file( + content: Text, + file_path: Union[Text, Path], + encoding: Text = DEFAULT_ENCODING, + append: bool = False, +) -> None: + """Writes text to a file. + + Args: + content: The content to write. + file_path: The path to which the content should be written. + encoding: The encoding which should be used. + append: Whether to append to the file or to truncate the file. + + """ + mode = "a" if append else "w" + with open(file_path, mode, encoding=encoding) as file: + file.write(content) def is_subdirectory(path: Text, potential_parent_directory: Text) -> bool: @@ -218,7 +334,7 @@ def create_temporary_file(data: Any, suffix: Text = "", mode: Text = "w+") -> Te mode defines NamedTemporaryFile's mode parameter in py3.""" - encoding = None if "b" in mode else "utf-8" + encoding = None if "b" in mode else DEFAULT_ENCODING f = tempfile.NamedTemporaryFile( mode=mode, suffix=suffix, delete=False, encoding=encoding ) @@ -228,7 +344,13 @@ def create_temporary_file(data: Any, suffix: Text = "", mode: Text = "w+") -> Te return f.name -def create_path(file_path: Text): +def create_temporary_directory() -> Text: + """Creates a tempfile.TemporaryDirectory.""" + f = tempfile.TemporaryDirectory() + return f.name + + +def create_path(file_path: Text) -> None: """Makes sure all directories in the 'file_path' exists.""" parent_dir = os.path.dirname(os.path.abspath(file_path)) @@ -236,15 +358,10 @@ def create_path(file_path: Text): os.makedirs(parent_dir) -def create_directory_for_file(file_path: Text) -> None: +def create_directory_for_file(file_path: Union[Text, Path]) -> None: """Creates any missing parent directories of this file path.""" - try: - os.makedirs(os.path.dirname(file_path)) - except OSError as e: - # be happy if someone already created the path - if e.errno != errno.EEXIST: - raise + create_directory(os.path.dirname(file_path)) def file_type_validator( @@ -329,7 +446,7 @@ def list_directory(path: Text) -> List[Text]: return [path] elif os.path.isdir(path): results = [] - for base, dirs, files in os.walk(path): + for base, dirs, files in os.walk(path, followlinks=True): # sort files for same order across runs files = sorted(files, key=_filename_without_prefix) # add not hidden files @@ -360,11 +477,60 @@ def create_directory(directory_path: Text) -> None: def zip_folder(folder: Text) -> Text: """Create an archive from a folder.""" - import tempfile import shutil zipped_path = tempfile.NamedTemporaryFile(delete=False) zipped_path.close() # WARN: not thread-safe! - return shutil.make_archive(zipped_path.name, str("zip"), folder) + return shutil.make_archive(zipped_path.name, "zip", folder) + + +def json_unpickle(file_name: Union[Text, Path]) -> Any: + """Unpickle an object from file using json. + + Args: + file_name: the file to load the object from + + Returns: the object + """ + import jsonpickle.ext.numpy as jsonpickle_numpy + import jsonpickle + + jsonpickle_numpy.register_handlers() + + file_content = read_file(file_name) + return jsonpickle.loads(file_content) + + +def json_pickle(file_name: Union[Text, Path], obj: Any) -> None: + """Pickle an object to a file using json. + + Args: + file_name: the file to store the object to + obj: the object to store + """ + import jsonpickle.ext.numpy as jsonpickle_numpy + import jsonpickle + + jsonpickle_numpy.register_handlers() + + write_text_file(jsonpickle.dumps(obj), file_name) + + +def encode_string(s: Text) -> Text: + """Return an encoded python string.""" + + def replace(match: Match) -> Text: + return ESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)] + + return ESCAPE.sub(replace, s) + + +def decode_string(s: Text) -> Text: + """Return a decoded python string.""" + + def replace(match: Match) -> Text: + return UNESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)] + + return UNESCAPE.sub(replace, s) diff --git a/rasa/utils/plotting.py b/rasa/utils/plotting.py new file mode 100644 index 000000000000..6035464e1c55 --- /dev/null +++ b/rasa/utils/plotting.py @@ -0,0 +1,181 @@ +import logging +import itertools +import os + +import numpy as np +from typing import List, Text, Optional, Union, Any +import matplotlib + +from rasa.constants import RESULTS_FILE +import rasa.utils.io as io_utils + +logger = logging.getLogger(__name__) + + +# At first, matplotlib will be initialized with default OS-specific available backend +# if that didn't happen, we'll try to set it up manually +if matplotlib.get_backend() is not None: + pass +else: # pragma: no cover + try: + # If the `tkinter` package is available, we can use the `TkAgg` backend + import tkinter + + matplotlib.use("TkAgg") + except ImportError: + matplotlib.use("agg") + + +def plot_confusion_matrix( + confusion_matrix: np.ndarray, + classes: Union[np.ndarray, List[Text]], + normalize: bool = False, + title: Text = "Confusion matrix", + color_map: Any = None, + zmin: int = 1, + output_file: Optional[Text] = None, +) -> None: + """ + Print and plot the provided confusion matrix. + Normalization can be applied by setting `normalize=True`. + + Args: + confusion_matrix: confusion matrix to plot + classes: class labels + normalize: If set to true, normalization will be applied. + title: title of the plot + color_map: color mapping + zmin: + output_file: output file to save plot to + + """ + import matplotlib.pyplot as plt + from matplotlib.colors import LogNorm + + zmax = confusion_matrix.max() + plt.clf() + if not color_map: + color_map = plt.cm.Blues + plt.imshow( + confusion_matrix, + interpolation="nearest", + cmap=color_map, + aspect="auto", + norm=LogNorm(vmin=zmin, vmax=zmax), + ) + plt.title(title) + plt.colorbar() + tick_marks = np.arange(len(classes)) + plt.xticks(tick_marks, classes, rotation=90) + plt.yticks(tick_marks, classes) + + if normalize: + confusion_matrix = ( + confusion_matrix.astype("float") + / confusion_matrix.sum(axis=1)[:, np.newaxis] + ) + logger.info(f"Normalized confusion matrix: \n{confusion_matrix}") + else: + logger.info(f"Confusion matrix, without normalization: \n{confusion_matrix}") + + thresh = confusion_matrix.max() / 2.0 + for i, j in itertools.product( + range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1]) + ): + plt.text( + j, + i, + confusion_matrix[i, j], + horizontalalignment="center", + color="white" if confusion_matrix[i, j] > thresh else "black", + ) + + plt.ylabel("True label") + plt.xlabel("Predicted label") + + # save confusion matrix to file before showing it + if output_file: + fig = plt.gcf() + fig.set_size_inches(20, 20) + fig.savefig(output_file, bbox_inches="tight") + + +def plot_histogram( + hist_data: List[List[float]], title: Text, output_file: Optional[Text] = None +) -> None: + """ + Plot a histogram of the confidence distribution of the predictions in two columns. + + Args: + hist_data: histogram data + output_file: output file to save the plot ot + """ + import matplotlib.pyplot as plt + + plt.gcf().clear() + + # Wine-ish colour for the confidences of hits. + # Blue-ish colour for the confidences of misses. + colors = ["#009292", "#920000"] + bins = [0.05 * i for i in range(1, 21)] + + plt.xlim([0, 1]) + plt.hist(hist_data, bins=bins, color=colors) + plt.xticks(bins) + plt.title(title) + plt.xlabel("Confidence") + plt.ylabel("Number of Samples") + plt.legend(["hits", "misses"]) + + if output_file: + fig = plt.gcf() + fig.set_size_inches(10, 10) + fig.savefig(output_file, bbox_inches="tight") + + +def plot_curve( + output_directory: Text, + number_of_examples: List[int], + x_label_text: Text, + y_label_text: Text, + graph_path: Text, +) -> None: + """Plot the results from a model comparison. + + Args: + output_directory: Output directory to save resulting plots to + number_of_examples: Number of examples per run + x_label_text: text for the x axis + y_label_text: text for the y axis + graph_path: output path of the plot + """ + import matplotlib.pyplot as plt + + ax = plt.gca() + + # load results from file + data = io_utils.read_json_file(os.path.join(output_directory, RESULTS_FILE)) + x = number_of_examples + + # compute mean of all the runs for different configs + for label in data.keys(): + if len(data[label]) == 0: + continue + mean = np.mean(data[label], axis=0) + std = np.std(data[label], axis=0) + ax.plot(x, mean, label=label, marker=".") + ax.fill_between( + x, + [m - s for m, s in zip(mean, std)], + [m + s for m, s in zip(mean, std)], + color="#6b2def", + alpha=0.2, + ) + ax.legend(loc=4) + + ax.set_xlabel(x_label_text) + ax.set_ylabel(y_label_text) + + plt.savefig(graph_path, format="pdf") + + logger.info(f"Comparison graph saved to '{graph_path}'.") diff --git a/rasa/utils/tensorflow/__init__.py b/rasa/utils/tensorflow/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rasa/utils/tensorflow/constants.py b/rasa/utils/tensorflow/constants.py new file mode 100644 index 000000000000..2943f01c3907 --- /dev/null +++ b/rasa/utils/tensorflow/constants.py @@ -0,0 +1,77 @@ +# constants for configuration parameters of our tensorflow models + +LABEL = "label" +HIDDEN_LAYERS_SIZES = "hidden_layers_sizes" +SHARE_HIDDEN_LAYERS = "share_hidden_layers" + +TRANSFORMER_SIZE = "transformer_size" +NUM_TRANSFORMER_LAYERS = "number_of_transformer_layers" +NUM_HEADS = "number_of_attention_heads" +UNIDIRECTIONAL_ENCODER = "unidirectional_encoder" +KEY_RELATIVE_ATTENTION = "use_key_relative_attention" +VALUE_RELATIVE_ATTENTION = "use_value_relative_attention" +MAX_RELATIVE_POSITION = "max_relative_position" + +BATCH_SIZES = "batch_size" +BATCH_STRATEGY = "batch_strategy" +EPOCHS = "epochs" +RANDOM_SEED = "random_seed" +LEARNING_RATE = "learning_rate" + +DENSE_DIMENSION = "dense_dimension" +CONCAT_DIMENSION = "concat_dimension" +EMBEDDING_DIMENSION = "embedding_dimension" + +SIMILARITY_TYPE = "similarity_type" +LOSS_TYPE = "loss_type" +NUM_NEG = "number_of_negative_examples" +MAX_POS_SIM = "maximum_positive_similarity" +MAX_NEG_SIM = "maximum_negative_similarity" +USE_MAX_NEG_SIM = "use_maximum_negative_similarity" + +SCALE_LOSS = "scale_loss" +REGULARIZATION_CONSTANT = "regularization_constant" +NEGATIVE_MARGIN_SCALE = "negative_margin_scale" +DROP_RATE = "drop_rate" +DROP_RATE_ATTENTION = "drop_rate_attention" +DROP_RATE_DIALOGUE = "drop_rate_dialogue" +DROP_RATE_LABEL = "drop_rate_label" + +WEIGHT_SPARSITY = "weight_sparsity" + +EVAL_NUM_EPOCHS = "evaluate_every_number_of_epochs" +EVAL_NUM_EXAMPLES = "evaluate_on_number_of_examples" + +INTENT_CLASSIFICATION = "intent_classification" +ENTITY_RECOGNITION = "entity_recognition" +MASKED_LM = "use_masked_language_model" + +SPARSE_INPUT_DROPOUT = "use_sparse_input_dropout" +DENSE_INPUT_DROPOUT = "use_dense_input_dropout" + +RANKING_LENGTH = "ranking_length" + +BILOU_FLAG = "BILOU_flag" + +RETRIEVAL_INTENT = "retrieval_intent" + +SOFTMAX = "softmax" +MARGIN = "margin" +AUTO = "auto" +INNER = "inner" +COSINE = "cosine" + +BALANCED = "balanced" +SEQUENCE = "sequence" + +POOLING = "pooling" +MAX_POOLING = "max" +MEAN_POOLING = "mean" + +TENSORBOARD_LOG_DIR = "tensorboard_log_directory" +TENSORBOARD_LOG_LEVEL = "tensorboard_log_level" + +SEQUENCE_FEATURES = "sequence_features" +SENTENCE_FEATURES = "sentence_features" + +FEATURIZERS = "featurizers" diff --git a/rasa/utils/tensorflow/crf.py b/rasa/utils/tensorflow/crf.py new file mode 100644 index 000000000000..e4e33511a851 --- /dev/null +++ b/rasa/utils/tensorflow/crf.py @@ -0,0 +1,217 @@ +import tensorflow as tf + +from tensorflow_addons.utils.types import TensorLike +from typeguard import typechecked +from typing import Tuple + + +# original code taken from +# https://github.com/tensorflow/addons/blob/master/tensorflow_addons/text/crf.py +# (modified to our neeeds) + + +class CrfDecodeForwardRnnCell(tf.keras.layers.AbstractRNNCell): + """Computes the forward decoding in a linear-chain CRF.""" + + @typechecked + def __init__(self, transition_params: TensorLike, **kwargs) -> None: + """Initialize the CrfDecodeForwardRnnCell. + + Args: + transition_params: A [num_tags, num_tags] matrix of binary + potentials. This matrix is expanded into a + [1, num_tags, num_tags] in preparation for the broadcast + summation occurring within the cell. + """ + super().__init__(**kwargs) + self._transition_params = tf.expand_dims(transition_params, 0) + self._num_tags = transition_params.shape[0] + + @property + def state_size(self) -> int: + return self._num_tags + + @property + def output_size(self) -> int: + return self._num_tags + + def build(self, input_shape): + super().build(input_shape) + + def call( + self, inputs: TensorLike, state: TensorLike + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Build the CrfDecodeForwardRnnCell. + + Args: + inputs: A [batch_size, num_tags] matrix of unary potentials. + state: A [batch_size, num_tags] matrix containing the previous step's + score values. + + Returns: + output: A [batch_size, num_tags * 2] matrix of backpointers and scores. + new_state: A [batch_size, num_tags] matrix of new score values. + """ + state = tf.expand_dims(state[0], 2) + transition_scores = state + self._transition_params + new_state = inputs + tf.reduce_max(transition_scores, [1]) + + backpointers = tf.argmax(transition_scores, 1) + backpointers = tf.cast(backpointers, tf.float32) + + # apply softmax to transition_scores to get scores in range from 0 to 1 + scores = tf.reduce_max(tf.nn.softmax(transition_scores, axis=1), [1]) + + # In the RNN implementation only the first value that is returned from a cell + # is kept throughout the RNN, so that you will have the values from each time + # step in the final output. As we need the backpointers as well as the scores + # for each time step, we concatenate them. + return tf.concat([backpointers, scores], axis=1), new_state + + +def crf_decode_forward( + inputs: TensorLike, + state: TensorLike, + transition_params: TensorLike, + sequence_lengths: TensorLike, +) -> Tuple[tf.Tensor, tf.Tensor]: + """Computes forward decoding in a linear-chain CRF. + + Args: + inputs: A [batch_size, num_tags] matrix of unary potentials. + state: A [batch_size, num_tags] matrix containing the previous step's + score values. + transition_params: A [num_tags, num_tags] matrix of binary potentials. + sequence_lengths: A [batch_size] vector of true sequence lengths. + + Returns: + output: A [batch_size, num_tags * 2] matrix of backpointers and scores. + new_state: A [batch_size, num_tags] matrix of new score values. + """ + sequence_lengths = tf.cast(sequence_lengths, dtype=tf.int32) + mask = tf.sequence_mask(sequence_lengths, tf.shape(inputs)[1]) + crf_fwd_cell = CrfDecodeForwardRnnCell(transition_params) + crf_fwd_layer = tf.keras.layers.RNN( + crf_fwd_cell, return_sequences=True, return_state=True + ) + return crf_fwd_layer(inputs, state, mask=mask) + + +def crf_decode_backward( + backpointers: TensorLike, scores: TensorLike, state: TensorLike +) -> Tuple[tf.Tensor, tf.Tensor]: + """Computes backward decoding in a linear-chain CRF. + + Args: + backpointers: A [batch_size, num_tags] matrix of backpointer of next step + (in time order). + scores: A [batch_size, num_tags] matrix of scores of next step (in time order). + state: A [batch_size, 1] matrix of tag index of next step. + + Returns: + new_tags: A [batch_size, num_tags] tensor containing the new tag indices. + new_scores: A [batch_size, num_tags] tensor containing the new score values. + """ + backpointers = tf.transpose(backpointers, [1, 0, 2]) + scores = tf.transpose(scores, [1, 0, 2]) + + def _scan_fn(_state: TensorLike, _inputs: TensorLike) -> tf.Tensor: + _state = tf.cast(tf.squeeze(_state, axis=[1]), dtype=tf.int32) + idxs = tf.stack([tf.range(tf.shape(_inputs)[0]), _state], axis=1) + return tf.expand_dims(tf.gather_nd(_inputs, idxs), axis=-1) + + output_tags = tf.scan(_scan_fn, backpointers, state) + # the dtype of the input parameters of tf.scan need to match + # convert state to float32 to match the type of scores + state = tf.cast(state, dtype=tf.float32) + output_scores = tf.scan(_scan_fn, scores, state) + + return tf.transpose(output_tags, [1, 0, 2]), tf.transpose(output_scores, [1, 0, 2]) + + +def crf_decode( + potentials: TensorLike, transition_params: TensorLike, sequence_length: TensorLike +) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: + """Decode the highest scoring sequence of tags. + + Args: + potentials: A [batch_size, max_seq_len, num_tags] tensor of + unary potentials. + transition_params: A [num_tags, num_tags] matrix of + binary potentials. + sequence_length: A [batch_size] vector of true sequence lengths. + + Returns: + decode_tags: A [batch_size, max_seq_len] matrix, with dtype `tf.int32`. + Contains the highest scoring tag indices. + decode_scores: A [batch_size, max_seq_len] matrix, containing the score of + `decode_tags`. + best_score: A [batch_size] vector, containing the best score of `decode_tags`. + """ + sequence_length = tf.cast(sequence_length, dtype=tf.int32) + + # If max_seq_len is 1, we skip the algorithm and simply return the + # argmax tag and the max activation. + def _single_seq_fn(): + decode_tags = tf.cast(tf.argmax(potentials, axis=2), dtype=tf.int32) + decode_scores = tf.reduce_max(tf.nn.softmax(potentials, axis=2), axis=2) + best_score = tf.reshape(tf.reduce_max(potentials, axis=2), shape=[-1]) + return decode_tags, decode_scores, best_score + + def _multi_seq_fn(): + # Computes forward decoding. Get last score and backpointers. + initial_state = tf.slice(potentials, [0, 0, 0], [-1, 1, -1]) + initial_state = tf.squeeze(initial_state, axis=[1]) + inputs = tf.slice(potentials, [0, 1, 0], [-1, -1, -1]) + + sequence_length_less_one = tf.maximum( + tf.constant(0, dtype=tf.int32), sequence_length - 1 + ) + + output, last_score = crf_decode_forward( + inputs, initial_state, transition_params, sequence_length_less_one + ) + + # output is a matrix of size [batch-size, max-seq-length, num-tags * 2] + # split the matrix on axis 2 to get the backpointers and scores, which are + # both of size [batch-size, max-seq-length, num-tags] + backpointers, scores = tf.split(output, 2, axis=2) + + backpointers = tf.cast(backpointers, dtype=tf.int32) + backpointers = tf.reverse_sequence( + backpointers, sequence_length_less_one, seq_axis=1 + ) + + scores = tf.reverse_sequence(scores, sequence_length_less_one, seq_axis=1) + + initial_state = tf.cast(tf.argmax(last_score, axis=1), dtype=tf.int32) + initial_state = tf.expand_dims(initial_state, axis=-1) + + initial_score = tf.reduce_max(tf.nn.softmax(last_score, axis=1), axis=[1]) + initial_score = tf.expand_dims(initial_score, axis=-1) + + decode_tags, decode_scores = crf_decode_backward( + backpointers, scores, initial_state + ) + + decode_tags = tf.squeeze(decode_tags, axis=[2]) + decode_tags = tf.concat([initial_state, decode_tags], axis=1) + decode_tags = tf.reverse_sequence(decode_tags, sequence_length, seq_axis=1) + + decode_scores = tf.squeeze(decode_scores, axis=[2]) + decode_scores = tf.concat([initial_score, decode_scores], axis=1) + decode_scores = tf.reverse_sequence(decode_scores, sequence_length, seq_axis=1) + + best_score = tf.reduce_max(last_score, axis=1) + + return decode_tags, decode_scores, best_score + + if potentials.shape[1] is not None: + # shape is statically know, so we just execute + # the appropriate code path + if potentials.shape[1] == 1: + return _single_seq_fn() + + return _multi_seq_fn() + + return tf.cond(tf.equal(tf.shape(potentials)[1], 1), _single_seq_fn, _multi_seq_fn) diff --git a/rasa/utils/tensorflow/environment.py b/rasa/utils/tensorflow/environment.py new file mode 100644 index 000000000000..cc8977f38e1b --- /dev/null +++ b/rasa/utils/tensorflow/environment.py @@ -0,0 +1,142 @@ +import logging +import os +from typing import Text, Dict +import typing +import rasa.utils.common as rasa_utils +from rasa.constants import ( + ENV_GPU_CONFIG, + ENV_CPU_INTER_OP_CONFIG, + ENV_CPU_INTRA_OP_CONFIG, +) + +if typing.TYPE_CHECKING: + from tensorflow import config as tf_config + +logger = logging.getLogger(__name__) + + +def _setup_gpu_environment() -> None: + """Set configuration for TensorFlow GPU environment based on the environment variable set.""" + + gpu_memory_config = os.getenv(ENV_GPU_CONFIG) + + if not gpu_memory_config: + return + + # Import from tensorflow only if necessary (environment variable was set) + from tensorflow import config as tf_config + + parsed_gpu_config = _parse_gpu_config(gpu_memory_config) + physical_gpus = tf_config.list_physical_devices("GPU") + + # Logic taken from https://www.tensorflow.org/guide/gpu + if physical_gpus: + for gpu_id, gpu_id_memory in parsed_gpu_config.items(): + _allocate_gpu_memory(physical_gpus[gpu_id], gpu_id_memory) + + else: + rasa_utils.raise_warning( + f"You have an environment variable '{ENV_GPU_CONFIG}' set but no GPUs were detected to configure." + ) + + +def _allocate_gpu_memory( + gpu_instance: "tf_config.PhysicalDevice", logical_memory: int +) -> None: + """Create a new logical device for the requested amount of memory. + + Args: + gpu_instance: PhysicalDevice instance of a GPU device. + logical_memory: Absolute amount of memory to be allocated to the new logical device. + """ + + from tensorflow import config as tf_config + + try: + tf_config.experimental.set_virtual_device_configuration( + gpu_instance, + [ + tf_config.experimental.VirtualDeviceConfiguration( + memory_limit=logical_memory + ) + ], + ) + + except RuntimeError: + # Helper explanation of where the error comes from + raise RuntimeError( + "Error while setting up tensorflow environment. " + "Virtual devices must be set before GPUs have been initialized." + ) + + +def _parse_gpu_config(gpu_memory_config: Text) -> Dict[int, int]: + """Parse GPU configuration variable from a string to a dict. + + Args: + gpu_memory_config: String containing the configuration for GPU usage. + + Returns: + Parsed configuration as a dictionary with GPU IDs as keys and requested memory as the value. + """ + + # gpu_config is of format "gpu_id_1:gpu_id_1_memory, gpu_id_2: gpu_id_2_memory" + # Parse it and store in a dictionary + parsed_gpu_config = {} + + try: + for instance in gpu_memory_config.split(","): + instance_gpu_id, instance_gpu_mem = instance.split(":") + instance_gpu_id = int(instance_gpu_id) + instance_gpu_mem = int(instance_gpu_mem) + + parsed_gpu_config[instance_gpu_id] = instance_gpu_mem + except ValueError: + # Helper explanation of where the error comes from + raise ValueError( + f"Error parsing GPU configuration. Please cross-check the format of '{ENV_GPU_CONFIG}' " + f"at https://rasa.com/docs/rasa/api/tensorflow_usage.html#restricting-absolute-gpu-memory-available ." + ) + + return parsed_gpu_config + + +def _setup_cpu_environment() -> None: + """Set configuration for the CPU environment based on the environment variable set.""" + + inter_op_parallel_threads = os.getenv(ENV_CPU_INTER_OP_CONFIG) + intra_op_parallel_threads = os.getenv(ENV_CPU_INTRA_OP_CONFIG) + + if not inter_op_parallel_threads and not intra_op_parallel_threads: + return + + from tensorflow import config as tf_config + + if inter_op_parallel_threads: + try: + inter_op_parallel_threads = int(inter_op_parallel_threads.strip()) + except ValueError: + raise ValueError( + f"Error parsing the environment variable '{ENV_CPU_INTER_OP_CONFIG}'. Please " + f"cross-check the value." + ) + + tf_config.threading.set_inter_op_parallelism_threads(inter_op_parallel_threads) + + if intra_op_parallel_threads: + try: + intra_op_parallel_threads = int(intra_op_parallel_threads.strip()) + except ValueError: + raise ValueError( + f"Error parsing the environment variable '{ENV_CPU_INTRA_OP_CONFIG}'. Please " + f"cross-check the value." + ) + + tf_config.threading.set_intra_op_parallelism_threads(intra_op_parallel_threads) + + +def setup_tf_environment() -> None: + """Setup CPU and GPU related environment settings for TensorFlow.""" + + _setup_cpu_environment() + _setup_gpu_environment() diff --git a/rasa/utils/tensorflow/layers.py b/rasa/utils/tensorflow/layers.py new file mode 100644 index 000000000000..c6d051191e1e --- /dev/null +++ b/rasa/utils/tensorflow/layers.py @@ -0,0 +1,946 @@ +import logging +from typing import List, Optional, Text, Tuple, Callable, Union, Any +import tensorflow as tf +import tensorflow_addons as tfa +import rasa.utils.tensorflow.crf +from tensorflow.python.keras.utils import tf_utils +from tensorflow.python.keras import backend as K +from rasa.utils.tensorflow.constants import SOFTMAX, MARGIN, COSINE, INNER + +logger = logging.getLogger(__name__) + + +class SparseDropout(tf.keras.layers.Dropout): + """Applies Dropout to the input. + + Dropout consists in randomly setting + a fraction `rate` of input units to 0 at each update during training time, + which helps prevent overfitting. + + Arguments: + rate: Float between 0 and 1; fraction of the input units to drop. + """ + + def call( + self, inputs: tf.SparseTensor, training: Optional[Union[tf.Tensor, bool]] = None + ) -> tf.SparseTensor: + """Apply dropout to sparse inputs. + + Arguments: + inputs: Input sparse tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode (adding dropout) or in inference mode (doing nothing). + + Returns: + Output of dropout layer. + + Raises: + A ValueError if inputs is not a sparse tensor + """ + + if not isinstance(inputs, tf.SparseTensor): + raise ValueError("Input tensor should be sparse.") + + if training is None: + training = K.learning_phase() + + def dropped_inputs() -> tf.SparseTensor: + to_retain_prob = tf.random.uniform( + tf.shape(inputs.values), 0, 1, inputs.values.dtype + ) + to_retain = tf.greater_equal(to_retain_prob, self.rate) + return tf.sparse.retain(inputs, to_retain) + + outputs = tf_utils.smart_cond( + training, dropped_inputs, lambda: tf.identity(inputs) + ) + # need to explicitly recreate sparse tensor, because otherwise the shape + # information will be lost after `retain` + # noinspection PyProtectedMember + return tf.SparseTensor(outputs.indices, outputs.values, inputs._dense_shape) + + +class DenseForSparse(tf.keras.layers.Dense): + """Dense layer for sparse input tensor. + + Just your regular densely-connected NN layer but for sparse tensors. + + `Dense` implements the operation: + `output = activation(dot(input, kernel) + bias)` + where `activation` is the element-wise activation function + passed as the `activation` argument, `kernel` is a weights matrix + created by the layer, and `bias` is a bias vector created by the layer + (only applicable if `use_bias` is `True`). + + Note: If the input to the layer has a rank greater than 2, then + it is flattened prior to the initial dot product with `kernel`. + + Arguments: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + If you don't specify anything, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix. + bias_initializer: Initializer for the bias vector. + reg_lambda: Float, regularization factor. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to + the output of the layer (its "activation").. + kernel_constraint: Constraint function applied to + the `kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + The most common situation would be + a 2D input with shape `(batch_size, input_dim)`. + + Output shape: + N-D tensor with shape: `(batch_size, ..., units)`. + For instance, for a 2D input with shape `(batch_size, input_dim)`, + the output would have shape `(batch_size, units)`. + """ + + def __init__(self, reg_lambda: float = 0, **kwargs: Any) -> None: + if reg_lambda > 0: + regularizer = tf.keras.regularizers.l2(reg_lambda) + else: + regularizer = None + + super().__init__(kernel_regularizer=regularizer, **kwargs) + + def call(self, inputs: tf.SparseTensor) -> tf.Tensor: + """Apply dense layer to sparse inputs. + + Arguments: + inputs: Input sparse tensor (of any rank). + + Returns: + Output of dense layer. + + Raises: + A ValueError if inputs is not a sparse tensor + """ + if not isinstance(inputs, tf.SparseTensor): + raise ValueError("Input tensor should be sparse.") + + # outputs will be 2D + outputs = tf.sparse.sparse_dense_matmul( + tf.sparse.reshape(inputs, [-1, tf.shape(inputs)[-1]]), self.kernel + ) + + if len(inputs.shape) == 3: + # reshape back + outputs = tf.reshape( + outputs, (tf.shape(inputs)[0], tf.shape(inputs)[1], -1) + ) + + if self.use_bias: + outputs = tf.nn.bias_add(outputs, self.bias) + if self.activation is not None: + return self.activation(outputs) + return outputs + + +class DenseWithSparseWeights(tf.keras.layers.Dense): + """Just your regular densely-connected NN layer but with sparse weights. + + `Dense` implements the operation: + `output = activation(dot(input, kernel) + bias)` + where `activation` is the element-wise activation function + passed as the `activation` argument, `kernel` is a weights matrix + created by the layer, and `bias` is a bias vector created by the layer + (only applicable if `use_bias` is `True`). + It creates `kernel_mask` to set fraction of the `kernel` weights to zero. + + Note: If the input to the layer has a rank greater than 2, then + it is flattened prior to the initial dot product with `kernel`. + + Arguments: + sparsity: Float between 0 and 1. Fraction of the `kernel` + weights to set to zero. + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + If you don't specify anything, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix. + bias_initializer: Initializer for the bias vector. + kernel_regularizer: Regularizer function applied to + the `kernel` weights matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to + the output of the layer (its "activation").. + kernel_constraint: Constraint function applied to + the `kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + The most common situation would be + a 2D input with shape `(batch_size, input_dim)`. + + Output shape: + N-D tensor with shape: `(batch_size, ..., units)`. + For instance, for a 2D input with shape `(batch_size, input_dim)`, + the output would have shape `(batch_size, units)`. + """ + + def __init__(self, sparsity: float = 0.8, **kwargs: Any) -> None: + super().__init__(**kwargs) + self.sparsity = sparsity + + def build(self, input_shape: tf.TensorShape) -> None: + super().build(input_shape) + # create random mask to set fraction of the `kernel` weights to zero + kernel_mask = tf.random.uniform(tf.shape(self.kernel), 0, 1) + kernel_mask = tf.cast( + tf.greater_equal(kernel_mask, self.sparsity), self.kernel.dtype + ) + self.kernel_mask = tf.Variable( + initial_value=kernel_mask, trainable=False, name="kernel_mask" + ) + + def call(self, inputs: tf.Tensor) -> tf.Tensor: + # set fraction of the `kernel` weights to zero according to precomputed mask + self.kernel.assign(self.kernel * self.kernel_mask) + return super().call(inputs) + + +class Ffnn(tf.keras.layers.Layer): + """Feed-forward network layer. + + Arguments: + layer_sizes: List of integers with dimensionality of the layers. + dropout_rate: Float between 0 and 1; fraction of the input units to drop. + reg_lambda: Float, regularization factor. + sparsity: Float between 0 and 1. Fraction of the `kernel` + weights to set to zero. + layer_name_suffix: Text added to the name of the layers. + + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + The most common situation would be + a 2D input with shape `(batch_size, input_dim)`. + + Output shape: + N-D tensor with shape: `(batch_size, ..., layer_sizes[-1])`. + For instance, for a 2D input with shape `(batch_size, input_dim)`, + the output would have shape `(batch_size, layer_sizes[-1])`. + """ + + def __init__( + self, + layer_sizes: List[int], + dropout_rate: float, + reg_lambda: float, + sparsity: float, + layer_name_suffix: Text, + ) -> None: + super().__init__(name=f"ffnn_{layer_name_suffix}") + + l2_regularizer = tf.keras.regularizers.l2(reg_lambda) + self._ffn_layers = [] + for i, layer_size in enumerate(layer_sizes): + self._ffn_layers.append( + DenseWithSparseWeights( + units=layer_size, + sparsity=sparsity, + activation=tfa.activations.gelu, + kernel_regularizer=l2_regularizer, + name=f"hidden_layer_{layer_name_suffix}_{i}", + ) + ) + self._ffn_layers.append(tf.keras.layers.Dropout(dropout_rate)) + + def call( + self, x: tf.Tensor, training: Optional[Union[tf.Tensor, bool]] = None + ) -> tf.Tensor: + for layer in self._ffn_layers: + x = layer(x, training=training) + + return x + + +class Embed(tf.keras.layers.Layer): + """Dense embedding layer. + + Arguments: + embed_dim: Positive integer, dimensionality of the output space. + reg_lambda: Float; regularization factor. + layer_name_suffix: Text added to the name of the layers. + similarity_type: Optional type of similarity measure to use, + either 'cosine' or 'inner'. + + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + The most common situation would be + a 2D input with shape `(batch_size, input_dim)`. + + Output shape: + N-D tensor with shape: `(batch_size, ..., embed_dim)`. + For instance, for a 2D input with shape `(batch_size, input_dim)`, + the output would have shape `(batch_size, embed_dim)`. + """ + + def __init__( + self, + embed_dim: int, + reg_lambda: float, + layer_name_suffix: Text, + similarity_type: Optional[Text] = None, + ) -> None: + super().__init__(name=f"embed_{layer_name_suffix}") + + self.similarity_type = similarity_type + if self.similarity_type and self.similarity_type not in {COSINE, INNER}: + raise ValueError( + f"Wrong similarity type '{self.similarity_type}', " + f"should be '{COSINE}' or '{INNER}'." + ) + + regularizer = tf.keras.regularizers.l2(reg_lambda) + self._dense = tf.keras.layers.Dense( + units=embed_dim, + activation=None, + kernel_regularizer=regularizer, + name=f"embed_layer_{layer_name_suffix}", + ) + + # noinspection PyMethodOverriding + def call(self, x: tf.Tensor) -> tf.Tensor: + x = self._dense(x) + if self.similarity_type == COSINE: + x = tf.nn.l2_normalize(x, axis=-1) + + return x + + +class InputMask(tf.keras.layers.Layer): + """The layer that masks 15% of the input. + + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + The most common situation would be + a 2D input with shape `(batch_size, input_dim)`. + + Output shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. + For instance, for a 2D input with shape `(batch_size, input_dim)`, + the output would have shape `(batch_size, input_dim)`. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + self._masking_prob = 0.85 + self._mask_vector_prob = 0.7 + self._random_vector_prob = 0.1 + + def build(self, input_shape: tf.TensorShape) -> None: + self.mask_vector = self.add_weight( + shape=(1, 1, input_shape[-1]), name="mask_vector" + ) + self.built = True + + # noinspection PyMethodOverriding + def call( + self, + x: tf.Tensor, + mask: tf.Tensor, + training: Optional[Union[tf.Tensor, bool]] = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Randomly mask input sequences. + + Arguments: + x: Input sequence tensor of rank 3. + mask: A tensor representing sequence mask, + contains `1` for inputs and `0` for padding. + training: Python boolean indicating whether the layer should behave in + training mode (mask inputs) or in inference mode (doing nothing). + + Returns: + A tuple of masked inputs and boolean mask. + """ + + if training is None: + training = K.learning_phase() + + lm_mask_prob = tf.random.uniform(tf.shape(mask), 0, 1, mask.dtype) * mask + lm_mask_bool = tf.greater_equal(lm_mask_prob, self._masking_prob) + + def x_masked() -> tf.Tensor: + x_random_pad = tf.random.uniform( + tf.shape(x), tf.reduce_min(x), tf.reduce_max(x), x.dtype + ) * (1 - mask) + # shuffle over batch dim + x_shuffle = tf.random.shuffle(x * mask + x_random_pad) + + # shuffle over sequence dim + x_shuffle = tf.transpose(x_shuffle, [1, 0, 2]) + x_shuffle = tf.random.shuffle(x_shuffle) + x_shuffle = tf.transpose(x_shuffle, [1, 0, 2]) + + # shuffle doesn't support backprop + x_shuffle = tf.stop_gradient(x_shuffle) + + mask_vector = tf.tile(self.mask_vector, (tf.shape(x)[0], tf.shape(x)[1], 1)) + + other_prob = tf.random.uniform(tf.shape(mask), 0, 1, mask.dtype) + other_prob = tf.tile(other_prob, (1, 1, x.shape[-1])) + x_other = tf.where( + other_prob < self._mask_vector_prob, + mask_vector, + tf.where( + other_prob < self._mask_vector_prob + self._random_vector_prob, + x_shuffle, + x, + ), + ) + + return tf.where(tf.tile(lm_mask_bool, (1, 1, x.shape[-1])), x_other, x) + + return ( + tf_utils.smart_cond(training, x_masked, lambda: tf.identity(x)), + lm_mask_bool, + ) + + +def _scale_loss(log_likelihood: tf.Tensor) -> tf.Tensor: + """Creates scaling loss coefficient depending on the prediction probability. + + Arguments: + log_likelihood: a tensor, log-likelihood of prediction + + Returns: + Scaling tensor. + """ + + p = tf.math.exp(log_likelihood) + # only scale loss if some examples are already learned + return tf.cond( + tf.reduce_max(p) > 0.5, + lambda: tf.stop_gradient(tf.pow((1 - p) / 0.5, 4)), + lambda: tf.ones_like(p), + ) + + +class CRF(tf.keras.layers.Layer): + """CRF layer. + + Arguments: + num_tags: Positive integer, number of tags. + reg_lambda: Float; regularization factor. + name: Optional name of the layer. + """ + + def __init__( + self, + num_tags: int, + reg_lambda: float, + scale_loss: bool, + name: Optional[Text] = None, + ) -> None: + super().__init__(name=name) + self.num_tags = num_tags + self.scale_loss = scale_loss + self.transition_regularizer = tf.keras.regularizers.l2(reg_lambda) + self.f1_score_metric = tfa.metrics.F1Score( + num_classes=num_tags - 1, # `0` prediction is not a prediction + average="micro", + ) + + def build(self, input_shape: tf.TensorShape) -> None: + # the weights should be created in `build` to apply random_seed + self.transition_params = self.add_weight( + shape=(self.num_tags, self.num_tags), + regularizer=self.transition_regularizer, + name="transitions", + ) + self.built = True + + # noinspection PyMethodOverriding + def call( + self, logits: tf.Tensor, sequence_lengths: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Decodes the highest scoring sequence of tags. + + Arguments: + logits: A [batch_size, max_seq_len, num_tags] tensor of + unary potentials. + sequence_lengths: A [batch_size] vector of true sequence lengths. + + Returns: + A [batch_size, max_seq_len] matrix, with dtype `tf.int32`. + Contains the highest scoring tag indices. + A [batch_size, max_seq_len] matrix, with dtype `tf.float32`. + Contains the confidence values of the highest scoring tag indices. + """ + predicted_ids, scores, _ = rasa.utils.tensorflow.crf.crf_decode( + logits, self.transition_params, sequence_lengths + ) + # set prediction index for padding to `0` + mask = tf.sequence_mask( + sequence_lengths, + maxlen=tf.shape(predicted_ids)[1], + dtype=predicted_ids.dtype, + ) + + confidence_values = scores * tf.cast(mask, tf.float32) + predicted_ids = predicted_ids * mask + + return predicted_ids, confidence_values + + def loss( + self, logits: tf.Tensor, tag_indices: tf.Tensor, sequence_lengths: tf.Tensor + ) -> tf.Tensor: + """Computes the log-likelihood of tag sequences in a CRF. + + Arguments: + logits: A [batch_size, max_seq_len, num_tags] tensor of unary potentials + to use as input to the CRF layer. + tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which + we compute the log-likelihood. + sequence_lengths: A [batch_size] vector of true sequence lengths. + + Returns: + Negative mean log-likelihood of all examples, + given the sequence of tag indices. + """ + + log_likelihood, _ = tfa.text.crf.crf_log_likelihood( + logits, tag_indices, sequence_lengths, self.transition_params + ) + loss = -log_likelihood + if self.scale_loss: + loss *= _scale_loss(log_likelihood) + + return tf.reduce_mean(loss) + + def f1_score( + self, tag_ids: tf.Tensor, pred_ids: tf.Tensor, mask: tf.Tensor + ) -> tf.Tensor: + """Calculates f1 score for train predictions""" + + mask_bool = tf.cast(mask[:, :, 0], tf.bool) + + # pick only non padding values and flatten sequences + tag_ids_flat = tf.boolean_mask(tag_ids, mask_bool) + pred_ids_flat = tf.boolean_mask(pred_ids, mask_bool) + + # set `0` prediction to not a prediction + num_tags = self.num_tags - 1 + + tag_ids_flat_one_hot = tf.one_hot(tag_ids_flat - 1, num_tags) + pred_ids_flat_one_hot = tf.one_hot(pred_ids_flat - 1, num_tags) + + return self.f1_score_metric(tag_ids_flat_one_hot, pred_ids_flat_one_hot) + + +class DotProductLoss(tf.keras.layers.Layer): + """Dot-product loss layer. + + Arguments: + num_neg: Positive integer, the number of incorrect labels; + the algorithm will minimize their similarity to the input. + loss_type: The type of the loss function, either 'softmax' or 'margin'. + mu_pos: Float, indicates how similar the algorithm should + try to make embedding vectors for correct labels; + should be 0.0 < ... < 1.0 for 'cosine' similarity type. + mu_neg: Float, maximum negative similarity for incorrect labels, + should be -1.0 < ... < 1.0 for 'cosine' similarity type. + use_max_sim_neg: Boolean, if 'True' the algorithm only minimizes + maximum similarity over incorrect intent labels, + used only if 'loss_type' is set to 'margin'. + neg_lambda: Float, the scale of how important is to minimize + the maximum similarity between embeddings of different labels, + used only if 'loss_type' is set to 'margin'. + scale_loss: Boolean, if 'True' scale loss inverse proportionally to + the confidence of the correct prediction. + name: Optional name of the layer. + parallel_iterations: Positive integer, the number of iterations allowed + to run in parallel. + same_sampling: Boolean, if 'True' sample same negative labels + for the whole batch. + """ + + def __init__( + self, + num_neg: int, + loss_type: Text, + mu_pos: float, + mu_neg: float, + use_max_sim_neg: bool, + neg_lambda: float, + scale_loss: bool, + name: Optional[Text] = None, + parallel_iterations: int = 1000, + same_sampling: bool = False, + ) -> None: + super().__init__(name=name) + self.num_neg = num_neg + self.loss_type = loss_type + self.mu_pos = mu_pos + self.mu_neg = mu_neg + self.use_max_sim_neg = use_max_sim_neg + self.neg_lambda = neg_lambda + self.scale_loss = scale_loss + self.parallel_iterations = parallel_iterations + self.same_sampling = same_sampling + + @staticmethod + def _make_flat(x: tf.Tensor) -> tf.Tensor: + """Make tensor 2D.""" + + return tf.reshape(x, (-1, x.shape[-1])) + + def _random_indices( + self, batch_size: tf.Tensor, total_candidates: tf.Tensor + ) -> tf.Tensor: + def rand_idxs() -> tf.Tensor: + """Create random tensor of indices""" + + # (1, num_neg) + return tf.expand_dims( + tf.random.shuffle(tf.range(total_candidates))[: self.num_neg], 0 + ) + + if self.same_sampling: + return tf.tile(rand_idxs(), (batch_size, 1)) + + def cond(idx: tf.Tensor, out: tf.Tensor) -> tf.Tensor: + """Condition for while loop""" + return idx < batch_size + + def body(idx: tf.Tensor, out: tf.Tensor) -> List[tf.Tensor]: + """Body of the while loop""" + return [ + # increment counter + idx + 1, + # add random indices + tf.concat([out, rand_idxs()], 0), + ] + + # first tensor already created + idx1 = tf.constant(1) + # create first random array of indices + out1 = rand_idxs() # (1, num_neg) + + return tf.while_loop( + cond, + body, + loop_vars=[idx1, out1], + shape_invariants=[idx1.shape, tf.TensorShape([None, self.num_neg])], + parallel_iterations=self.parallel_iterations, + back_prop=False, + )[1] + + @staticmethod + def _sample_idxs(batch_size: tf.Tensor, x: tf.Tensor, idxs: tf.Tensor) -> tf.Tensor: + """Sample negative examples for given indices""" + + tiled = tf.tile(tf.expand_dims(x, 0), (batch_size, 1, 1)) + + return tf.gather(tiled, idxs, batch_dims=1) + + def _get_bad_mask( + self, labels: tf.Tensor, target_labels: tf.Tensor, idxs: tf.Tensor + ) -> tf.Tensor: + """Calculate bad mask for given indices. + + Checks that input features are different for positive negative samples. + """ + + pos_labels = tf.expand_dims(target_labels, axis=-2) + neg_labels = self._sample_idxs(tf.shape(target_labels)[0], labels, idxs) + + return tf.cast( + tf.reduce_all(tf.equal(neg_labels, pos_labels), axis=-1), pos_labels.dtype + ) + + def _get_negs( + self, embeds: tf.Tensor, labels: tf.Tensor, target_labels: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Get negative examples from given tensor.""" + + embeds_flat = self._make_flat(embeds) + labels_flat = self._make_flat(labels) + target_labels_flat = self._make_flat(target_labels) + + total_candidates = tf.shape(embeds_flat)[0] + target_size = tf.shape(target_labels_flat)[0] + + neg_ids = self._random_indices(target_size, total_candidates) + + neg_embeds = self._sample_idxs(target_size, embeds_flat, neg_ids) + bad_negs = self._get_bad_mask(labels_flat, target_labels_flat, neg_ids) + + # check if inputs have sequence dimension + if len(target_labels.shape) == 3: + # tensors were flattened for sampling, reshape back + # add sequence dimension if it was present in the inputs + target_shape = tf.shape(target_labels) + neg_embeds = tf.reshape( + neg_embeds, (target_shape[0], target_shape[1], -1, embeds.shape[-1]) + ) + bad_negs = tf.reshape(bad_negs, (target_shape[0], target_shape[1], -1)) + + return neg_embeds, bad_negs + + def _sample_negatives( + self, + inputs_embed: tf.Tensor, + labels_embed: tf.Tensor, + labels: tf.Tensor, + all_labels_embed: tf.Tensor, + all_labels: tf.Tensor, + ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: + """Sample negative examples.""" + + pos_inputs_embed = tf.expand_dims(inputs_embed, axis=-2) + pos_labels_embed = tf.expand_dims(labels_embed, axis=-2) + + # sample negative inputs + neg_inputs_embed, inputs_bad_negs = self._get_negs(inputs_embed, labels, labels) + # sample negative labels + neg_labels_embed, labels_bad_negs = self._get_negs( + all_labels_embed, all_labels, labels + ) + return ( + pos_inputs_embed, + pos_labels_embed, + neg_inputs_embed, + neg_labels_embed, + inputs_bad_negs, + labels_bad_negs, + ) + + @staticmethod + def sim(a: tf.Tensor, b: tf.Tensor, mask: Optional[tf.Tensor] = None) -> tf.Tensor: + """Calculate similarity between given tensors.""" + + sim = tf.reduce_sum(a * b, axis=-1) + if mask is not None: + sim *= tf.expand_dims(mask, 2) + + return sim + + @staticmethod + def confidence_from_sim(sim: tf.Tensor, similarity_type: Text) -> tf.Tensor: + if similarity_type == COSINE: + # clip negative values to zero + return tf.nn.relu(sim) + else: + # normalize result to [0, 1] with softmax + return tf.nn.softmax(sim) + + def _train_sim( + self, + pos_inputs_embed: tf.Tensor, + pos_labels_embed: tf.Tensor, + neg_inputs_embed: tf.Tensor, + neg_labels_embed: tf.Tensor, + inputs_bad_negs: tf.Tensor, + labels_bad_negs: tf.Tensor, + mask: Optional[tf.Tensor], + ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: + """Define similarity.""" + + # calculate similarity with several + # embedded actions for the loss + neg_inf = tf.constant(-1e9) + + sim_pos = self.sim(pos_inputs_embed, pos_labels_embed, mask) + sim_neg_il = ( + self.sim(pos_inputs_embed, neg_labels_embed, mask) + + neg_inf * labels_bad_negs + ) + sim_neg_ll = ( + self.sim(pos_labels_embed, neg_labels_embed, mask) + + neg_inf * labels_bad_negs + ) + sim_neg_ii = ( + self.sim(pos_inputs_embed, neg_inputs_embed, mask) + + neg_inf * inputs_bad_negs + ) + sim_neg_li = ( + self.sim(pos_labels_embed, neg_inputs_embed, mask) + + neg_inf * inputs_bad_negs + ) + + # output similarities between user input and bot actions + # and similarities between bot actions and similarities between user inputs + return sim_pos, sim_neg_il, sim_neg_ll, sim_neg_ii, sim_neg_li + + @staticmethod + def _calc_accuracy(sim_pos: tf.Tensor, sim_neg: tf.Tensor) -> tf.Tensor: + """Calculate accuracy.""" + + max_all_sim = tf.reduce_max(tf.concat([sim_pos, sim_neg], axis=-1), axis=-1) + return tf.reduce_mean( + tf.cast( + tf.math.equal(max_all_sim, tf.squeeze(sim_pos, axis=-1)), tf.float32 + ) + ) + + def _loss_margin( + self, + sim_pos: tf.Tensor, + sim_neg_il: tf.Tensor, + sim_neg_ll: tf.Tensor, + sim_neg_ii: tf.Tensor, + sim_neg_li: tf.Tensor, + mask: Optional[tf.Tensor], + ) -> tf.Tensor: + """Define max margin loss.""" + + # loss for maximizing similarity with correct action + loss = tf.maximum(0.0, self.mu_pos - tf.squeeze(sim_pos, axis=-1)) + + # loss for minimizing similarity with `num_neg` incorrect actions + if self.use_max_sim_neg: + # minimize only maximum similarity over incorrect actions + max_sim_neg_il = tf.reduce_max(sim_neg_il, axis=-1) + loss += tf.maximum(0.0, self.mu_neg + max_sim_neg_il) + else: + # minimize all similarities with incorrect actions + max_margin = tf.maximum(0.0, self.mu_neg + sim_neg_il) + loss += tf.reduce_sum(max_margin, axis=-1) + + # penalize max similarity between pos bot and neg bot embeddings + max_sim_neg_ll = tf.maximum( + 0.0, self.mu_neg + tf.reduce_max(sim_neg_ll, axis=-1) + ) + loss += max_sim_neg_ll * self.neg_lambda + + # penalize max similarity between pos dial and neg dial embeddings + max_sim_neg_ii = tf.maximum( + 0.0, self.mu_neg + tf.reduce_max(sim_neg_ii, axis=-1) + ) + loss += max_sim_neg_ii * self.neg_lambda + + # penalize max similarity between pos bot and neg dial embeddings + max_sim_neg_li = tf.maximum( + 0.0, self.mu_neg + tf.reduce_max(sim_neg_li, axis=-1) + ) + loss += max_sim_neg_li * self.neg_lambda + + if mask is not None: + # mask loss for different length sequences + loss *= mask + # average the loss over sequence length + loss = tf.reduce_sum(loss, axis=-1) / tf.reduce_sum(mask, axis=1) + + # average the loss over the batch + loss = tf.reduce_mean(loss) + + return loss + + def _loss_softmax( + self, + sim_pos: tf.Tensor, + sim_neg_il: tf.Tensor, + sim_neg_ll: tf.Tensor, + sim_neg_ii: tf.Tensor, + sim_neg_li: tf.Tensor, + mask: Optional[tf.Tensor], + ) -> tf.Tensor: + """Define softmax loss.""" + + logits = tf.concat( + [sim_pos, sim_neg_il, sim_neg_ll, sim_neg_ii, sim_neg_li], axis=-1 + ) + + # create label_ids for softmax + label_ids = tf.zeros_like(logits[..., 0], tf.int32) + + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=label_ids, logits=logits + ) + + if self.scale_loss: + # in case of cross entropy log_likelihood = -loss + loss *= _scale_loss(-loss) + + if mask is not None: + loss *= mask + + if len(loss.shape) == 2: + # average over the sequence + if mask is not None: + loss = tf.reduce_sum(loss, axis=-1) / tf.reduce_sum(mask, axis=-1) + else: + loss = tf.reduce_mean(loss, axis=-1) + + # average the loss over the batch + return tf.reduce_mean(loss) + + @property + def _chosen_loss(self) -> Callable: + """Use loss depending on given option.""" + + if self.loss_type == MARGIN: + return self._loss_margin + elif self.loss_type == SOFTMAX: + return self._loss_softmax + else: + raise ValueError( + f"Wrong loss type '{self.loss_type}', " + f"should be '{MARGIN}' or '{SOFTMAX}'" + ) + + # noinspection PyMethodOverriding + def call( + self, + inputs_embed: tf.Tensor, + labels_embed: tf.Tensor, + labels: tf.Tensor, + all_labels_embed: tf.Tensor, + all_labels: tf.Tensor, + mask: Optional[tf.Tensor] = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Calculate loss and accuracy. + + Arguments: + inputs_embed: Embedding tensor for the batch inputs. + labels_embed: Embedding tensor for the batch labels. + labels: Tensor representing batch labels. + all_labels_embed: Embedding tensor for the all labels. + all_labels: Tensor representing all labels. + mask: Optional tensor representing sequence mask, + contains `1` for inputs and `0` for padding. + + Returns: + loss: Total loss. + accuracy: Training accuracy. + """ + ( + pos_inputs_embed, + pos_labels_embed, + neg_inputs_embed, + neg_labels_embed, + inputs_bad_negs, + labels_bad_negs, + ) = self._sample_negatives( + inputs_embed, labels_embed, labels, all_labels_embed, all_labels + ) + + # calculate similarities + sim_pos, sim_neg_il, sim_neg_ll, sim_neg_ii, sim_neg_li = self._train_sim( + pos_inputs_embed, + pos_labels_embed, + neg_inputs_embed, + neg_labels_embed, + inputs_bad_negs, + labels_bad_negs, + mask, + ) + + accuracy = self._calc_accuracy(sim_pos, sim_neg_il) + + loss = self._chosen_loss( + sim_pos, sim_neg_il, sim_neg_ll, sim_neg_ii, sim_neg_li, mask + ) + + return loss, accuracy diff --git a/rasa/utils/tensorflow/model_data.py b/rasa/utils/tensorflow/model_data.py new file mode 100644 index 000000000000..1106531c4a6b --- /dev/null +++ b/rasa/utils/tensorflow/model_data.py @@ -0,0 +1,591 @@ +import logging + +import numpy as np +import scipy.sparse +import tensorflow as tf + +from sklearn.model_selection import train_test_split +from typing import Optional, Dict, Text, List, Tuple, Any, Union, Generator, NamedTuple +from collections import defaultdict +from rasa.utils.tensorflow.constants import BALANCED, SEQUENCE + +logger = logging.getLogger(__name__) + + +# Mapping of feature name to a list of numpy arrays representing the actual features +# For example: +# "text_features" -> [ +# "numpy array containing dense features for every training example", +# "numpy array containing sparse features for every training example" +# ] +Data = Dict[Text, List[np.ndarray]] + + +class FeatureSignature(NamedTuple): + """Stores the shape and the type (sparse vs dense) of features.""" + + is_sparse: bool + feature_dimension: Optional[int] + + +class RasaModelData: + """Data object used for all RasaModels. + + It contains all features needed to train the models. + """ + + def __init__( + self, label_key: Optional[Text] = None, data: Optional[Data] = None + ) -> None: + """ + Initializes the RasaModelData object. + + Args: + label_key: the label_key used for balancing, etc. + data: the data holding the features + """ + + self.data = data or {} + self.label_key = label_key + # should be updated when features are added + self.num_examples = self.number_of_examples() + + def get_only(self, key: Text) -> Optional[np.ndarray]: + if key in self.data: + return self.data[key][0] + else: + return None + + def get(self, key: Text) -> List[np.ndarray]: + if key in self.data: + return self.data[key] + else: + return [] + + def items(self): + return self.data.items() + + def values(self): + return self.data.values() + + def keys(self): + return self.data.keys() + + def first_data_example(self) -> Data: + return { + feature_name: [feature[:1] for feature in features] + for feature_name, features in self.data.items() + } + + def feature_not_exist(self, key: Text) -> bool: + """Check if feature key is present and features are available.""" + + return key not in self.data or not self.data[key] + + def is_empty(self) -> bool: + """Checks if data is set.""" + + return not self.data + + def number_of_examples(self, data: Optional[Data] = None) -> int: + """Obtain number of examples in data. + + Raises: A ValueError if number of examples differ for different features. + """ + + if not data: + data = self.data + + if not data: + return 0 + + example_lengths = [v.shape[0] for values in data.values() for v in values] + + # check if number of examples is the same for all values + if not all(length == example_lengths[0] for length in example_lengths): + raise ValueError( + f"Number of examples differs for keys '{data.keys()}'. Number of " + f"examples should be the same for all data." + ) + + return example_lengths[0] + + def feature_dimension(self, key: Text) -> int: + """Get the feature dimension of the given key.""" + + if key not in self.data: + return 0 + + number_of_features = 0 + for data in self.data[key]: + if data.size > 0: + number_of_features += data[0].shape[-1] + + return number_of_features + + def add_features(self, key: Text, features: List[np.ndarray]): + """Add list of features to data under specified key. + + Should update number of examples. + """ + + if not features: + return + + if key in self.data: + raise ValueError(f"Key '{key}' already exists in RasaModelData.") + + self.data[key] = [] + + for data in features: + if data.size > 0: + self.data[key].append(data) + + if not self.data[key]: + del self.data[key] + + # update number of examples + self.num_examples = self.number_of_examples() + + def add_lengths(self, key: Text, from_key: Text) -> None: + """Adds np.array of lengths of sequences to data under given key.""" + if not self.data.get(from_key): + return + + self.data[key] = [] + + for data in self.data[from_key]: + if data.size > 0: + lengths = np.array([x.shape[0] for x in data]) + self.data[key].append(lengths) + break + + def split( + self, number_of_test_examples: int, random_seed: int + ) -> Tuple["RasaModelData", "RasaModelData"]: + """Create random hold out test set using stratified split.""" + + self._check_label_key() + + if self.label_key is None: + # randomly split data as no label key is split + multi_values = [v for values in self.data.values() for v in values] + solo_values = [[] for values in self.data.values() for v in values] + stratify = None + else: + # make sure that examples for each label value are in both split sets + label_ids = self._create_label_ids(self.data[self.label_key][0]) + label_counts = dict(zip(*np.unique(label_ids, return_counts=True, axis=0))) + + self._check_train_test_sizes(number_of_test_examples, label_counts) + + counts = np.array([label_counts[label] for label in label_ids]) + # we perform stratified train test split, + # which insures every label is present in the train and test data + # this operation can be performed only for labels + # that contain several data points + multi_values = [ + v[counts > 1] for values in self.data.values() for v in values + ] + # collect data points that are unique for their label + solo_values = [ + v[counts == 1] for values in self.data.values() for v in values + ] + + stratify = label_ids[counts > 1] + + output_values = train_test_split( + *multi_values, + test_size=number_of_test_examples, + random_state=random_seed, + stratify=stratify, + ) + + return self._convert_train_test_split(output_values, solo_values) + + def get_signature(self) -> Dict[Text, List[FeatureSignature]]: + """Get signature of RasaModelData. + + Signature stores the shape and whether features are sparse or not for every key. + """ + + return { + key: [ + FeatureSignature( + True if isinstance(v[0], scipy.sparse.spmatrix) else False, + v[0].shape[-1] if v[0].shape else None, + ) + for v in values + ] + for key, values in self.data.items() + } + + def as_tf_dataset( + self, batch_size: int, batch_strategy: Text = SEQUENCE, shuffle: bool = False + ) -> tf.data.Dataset: + """Create tf dataset.""" + + shapes, types = self._get_shapes_types() + + return tf.data.Dataset.from_generator( + lambda batch_size_: self._gen_batch(batch_size_, batch_strategy, shuffle), + output_types=types, + output_shapes=shapes, + args=([batch_size]), + ) + + def prepare_batch( + self, + data: Optional[Data] = None, + start: Optional[int] = None, + end: Optional[int] = None, + tuple_sizes: Optional[Dict[Text, int]] = None, + ) -> Tuple[Optional[np.ndarray]]: + """Slices model data into batch using given start and end value.""" + + if not data: + data = self.data + + batch_data = [] + + for key, values in data.items(): + # add None for not present values during processing + if not values: + if tuple_sizes: + batch_data += [None] * tuple_sizes[key] + else: + batch_data.append(None) + continue + + for v in values: + if start is not None and end is not None: + _data = v[start:end] + elif start is not None: + _data = v[start:] + elif end is not None: + _data = v[:end] + else: + _data = v[:] + + if isinstance(_data[0], scipy.sparse.spmatrix): + batch_data.extend(self._scipy_matrix_to_values(_data)) + else: + batch_data.append(self._pad_dense_data(_data)) + + # len of batch_data is equal to the number of keys in model data + return tuple(batch_data) + + def _get_shapes_types(self) -> Tuple: + """Extract shapes and types from model data.""" + + types = [] + shapes = [] + + def append_shape(features: np.ndarray) -> None: + if isinstance(features[0], scipy.sparse.spmatrix): + # scipy matrix is converted into indices, data, shape + shapes.append((None, features[0].ndim + 1)) + shapes.append((None,)) + shapes.append((features[0].ndim + 1)) + elif features[0].ndim == 0: + shapes.append((None,)) + elif features[0].ndim == 1: + shapes.append((None, features[0].shape[-1])) + else: + shapes.append((None, None, features[0].shape[-1])) + + def append_type(features: np.ndarray) -> None: + if isinstance(features[0], scipy.sparse.spmatrix): + # scipy matrix is converted into indices, data, shape + types.append(tf.int64) + types.append(tf.float32) + types.append(tf.int64) + else: + types.append(tf.float32) + + for values in self.data.values(): + for v in values: + append_shape(v) + append_type(v) + + return tuple(shapes), tuple(types) + + def _shuffled_data(self, data: Data) -> Data: + """Shuffle model data.""" + + ids = np.random.permutation(self.num_examples) + return self._data_for_ids(data, ids) + + def _balanced_data(self, data: Data, batch_size: int, shuffle: bool) -> Data: + """Mix model data to account for class imbalance. + + This batching strategy puts rare classes approximately in every other batch, + by repeating them. Mimics stratified batching, but also takes into account + that more populated classes should appear more often. + """ + + self._check_label_key() + + # skip balancing if labels are token based + if self.label_key is None or data[self.label_key][0][0].size > 1: + return data + + label_ids = self._create_label_ids(data[self.label_key][0]) + + unique_label_ids, counts_label_ids = np.unique( + label_ids, return_counts=True, axis=0 + ) + num_label_ids = len(unique_label_ids) + + # group data points by their label + # need to call every time, so that the data is shuffled inside each class + data_by_label = self._split_by_label_ids(data, label_ids, unique_label_ids) + + # running index inside each data grouped by labels + data_idx = [0] * num_label_ids + # number of cycles each label was passed + num_data_cycles = [0] * num_label_ids + # if a label was skipped in current batch + skipped = [False] * num_label_ids + + new_data = defaultdict(list) + + while min(num_data_cycles) == 0: + if shuffle: + indices_of_labels = np.random.permutation(num_label_ids) + else: + indices_of_labels = range(num_label_ids) + + for index in indices_of_labels: + if num_data_cycles[index] > 0 and not skipped[index]: + skipped[index] = True + continue + + skipped[index] = False + + index_batch_size = ( + int(counts_label_ids[index] / self.num_examples * batch_size) + 1 + ) + + for k, values in data_by_label[index].items(): + for i, v in enumerate(values): + if len(new_data[k]) < i + 1: + new_data[k].append([]) + new_data[k][i].append( + v[data_idx[index] : data_idx[index] + index_batch_size] + ) + + data_idx[index] += index_batch_size + if data_idx[index] >= counts_label_ids[index]: + num_data_cycles[index] += 1 + data_idx[index] = 0 + + if min(num_data_cycles) > 0: + break + + final_data = defaultdict(list) + for k, values in new_data.items(): + for v in values: + final_data[k].append(np.concatenate(v)) + + return final_data + + def _gen_batch( + self, batch_size: int, batch_strategy: Text = SEQUENCE, shuffle: bool = False + ) -> Generator[Tuple[Optional[np.ndarray]], None, None]: + """Generate batches.""" + + data = self.data + num_examples = self.num_examples + + if shuffle: + data = self._shuffled_data(data) + + if batch_strategy == BALANCED: + data = self._balanced_data(data, batch_size, shuffle) + # after balancing, number of examples increased + num_examples = self.number_of_examples(data) + + num_batches = num_examples // batch_size + int(num_examples % batch_size > 0) + + for batch_num in range(num_batches): + start = batch_num * batch_size + end = start + batch_size + + yield self.prepare_batch(data, start, end) + + def _check_train_test_sizes( + self, number_of_test_examples: int, label_counts: Dict[Any, int] + ): + """Check whether the test data set is too large or too small.""" + + if number_of_test_examples >= self.num_examples - len(label_counts): + raise ValueError( + f"Test set of {number_of_test_examples} is too large. Remaining " + f"train set should be at least equal to number of classes " + f"{len(label_counts)}." + ) + elif number_of_test_examples < len(label_counts): + raise ValueError( + f"Test set of {number_of_test_examples} is too small. It should " + f"be at least equal to number of classes {label_counts}." + ) + + @staticmethod + def _data_for_ids(data: Optional[Data], ids: np.ndarray) -> Data: + """Filter model data by ids.""" + + new_data = defaultdict(list) + + if data is None: + return new_data + + for k, values in data.items(): + for v in values: + new_data[k].append(v[ids]) + return new_data + + def _split_by_label_ids( + self, data: Optional[Data], label_ids: np.ndarray, unique_label_ids: np.ndarray + ) -> List["RasaModelData"]: + """Reorganize model data into a list of model data with the same labels.""" + + label_data = [] + for label_id in unique_label_ids: + matching_ids = label_ids == label_id + label_data.append( + RasaModelData(self.label_key, self._data_for_ids(data, matching_ids)) + ) + return label_data + + def _check_label_key(self): + if self.label_key is not None and ( + self.label_key not in self.data or len(self.data[self.label_key]) > 1 + ): + raise ValueError(f"Key '{self.label_key}' not in RasaModelData.") + + def _convert_train_test_split( + self, output_values: List[Any], solo_values: List[Any] + ) -> Tuple["RasaModelData", "RasaModelData"]: + """Converts the output of sklearn's train_test_split into model data.""" + + data_train = defaultdict(list) + data_val = defaultdict(list) + + # output_values = x_train, x_val, y_train, y_val, z_train, z_val, etc. + # order is kept, e.g. same order as model data keys + + # train datasets have an even index + index = 0 + for key, values in self.data.items(): + for _ in values: + data_train[key].append( + self._combine_features(output_values[index * 2], solo_values[index]) + ) + index += 1 + + # val datasets have an odd index + index = 0 + for key, values in self.data.items(): + for _ in range(len(values)): + data_val[key].append(output_values[(index * 2) + 1]) + index += 1 + + return ( + RasaModelData(self.label_key, data_train), + RasaModelData(self.label_key, data_val), + ) + + @staticmethod + def _combine_features( + feature_1: Union[np.ndarray, scipy.sparse.spmatrix], + feature_2: Union[np.ndarray, scipy.sparse.spmatrix], + ) -> Union[np.ndarray, scipy.sparse.spmatrix]: + """Concatenate features.""" + + if isinstance(feature_1, scipy.sparse.spmatrix) and isinstance( + feature_2, scipy.sparse.spmatrix + ): + if feature_2.shape[0] == 0: + return feature_1 + if feature_1.shape[0] == 0: + return feature_2 + return scipy.sparse.vstack([feature_1, feature_2]) + + return np.concatenate([feature_1, feature_2]) + + @staticmethod + def _create_label_ids(label_ids: np.ndarray) -> np.ndarray: + """Convert various size label_ids into single dim array. + + For multi-label y, map each distinct row to a string representation + using join because str(row) uses an ellipsis if len(row) > 1000. + Idea taken from sklearn's stratify split. + """ + + if label_ids.ndim == 1: + return label_ids + + if label_ids.ndim == 2 and label_ids.shape[-1] == 1: + return label_ids[:, 0] + + if label_ids.ndim == 2: + return np.array([" ".join(row.astype("str")) for row in label_ids]) + + if label_ids.ndim == 3 and label_ids.shape[-1] == 1: + return np.array([" ".join(row.astype("str")) for row in label_ids[:, :, 0]]) + + raise ValueError("Unsupported label_ids dimensions") + + @staticmethod + def _pad_dense_data(array_of_dense: np.ndarray) -> np.ndarray: + """Pad data of different lengths. + + Sequential data is padded with zeros. Zeros are added to the end of data. + """ + + if array_of_dense[0].ndim < 2: + # data doesn't contain a sequence + return array_of_dense.astype(np.float32) + + data_size = len(array_of_dense) + max_seq_len = max([x.shape[0] for x in array_of_dense]) + + data_padded = np.zeros( + [data_size, max_seq_len, array_of_dense[0].shape[-1]], + dtype=array_of_dense[0].dtype, + ) + for i in range(data_size): + data_padded[i, : array_of_dense[i].shape[0], :] = array_of_dense[i] + + return data_padded.astype(np.float32) + + @staticmethod + def _scipy_matrix_to_values(array_of_sparse: np.ndarray) -> List[np.ndarray]: + """Convert a scipy matrix into indices, data, and shape.""" + + # we need to make sure that the matrices are coo_matrices otherwise the + # transformation does not work (e.g. you cannot access x.row, x.col) + if not isinstance(array_of_sparse[0], scipy.sparse.coo_matrix): + array_of_sparse = [x.tocoo() for x in array_of_sparse] + + max_seq_len = max([x.shape[0] for x in array_of_sparse]) + + # get the indices of values + indices = np.hstack( + [ + np.vstack([i * np.ones_like(x.row), x.row, x.col]) + for i, x in enumerate(array_of_sparse) + ] + ).T + + data = np.hstack([x.data for x in array_of_sparse]) + + number_of_features = array_of_sparse[0].shape[-1] + shape = np.array((len(array_of_sparse), max_seq_len, number_of_features)) + + return [ + indices.astype(np.int64), + data.astype(np.float32), + shape.astype(np.int64), + ] diff --git a/rasa/utils/tensorflow/models.py b/rasa/utils/tensorflow/models.py new file mode 100644 index 000000000000..982485a0cbbd --- /dev/null +++ b/rasa/utils/tensorflow/models.py @@ -0,0 +1,513 @@ +import datetime + +import tensorflow as tf +import numpy as np +import logging +from collections import defaultdict +from typing import List, Text, Dict, Tuple, Union, Optional, Callable, TYPE_CHECKING + +from tqdm import tqdm +from rasa.utils.common import is_logging_disabled +from rasa.utils.tensorflow.model_data import RasaModelData, FeatureSignature +from rasa.utils.tensorflow.constants import SEQUENCE, TENSORBOARD_LOG_LEVEL + +if TYPE_CHECKING: + from tensorflow.python.ops.summary_ops_v2 import ResourceSummaryWriter + +logger = logging.getLogger(__name__) + + +TENSORBOARD_LOG_LEVELS = ["epoch", "minibatch"] + + +# noinspection PyMethodOverriding +class RasaModel(tf.keras.models.Model): + """Completely override all public methods of keras Model. + + Cannot be used as tf.keras.Model + """ + + def __init__( + self, + random_seed: Optional[int] = None, + tensorboard_log_dir: Optional[Text] = None, + tensorboard_log_level: Optional[Text] = "epoch", + **kwargs, + ) -> None: + """Initialize the RasaModel. + + Args: + random_seed: set the random seed to get reproducible results + """ + super().__init__(**kwargs) + + self.total_loss = tf.keras.metrics.Mean(name="t_loss") + self.metrics_to_log = ["t_loss"] + + self._training = None # training phase should be defined when building a graph + + self._predict_function = None + + self.random_seed = random_seed + + self.tensorboard_log_dir = tensorboard_log_dir + self.tensorboard_log_level = tensorboard_log_level + + self.train_summary_writer = None + self.test_summary_writer = None + self.model_summary_file = None + self.tensorboard_log_on_epochs = True + + def _set_up_tensorboard_writer(self) -> None: + if self.tensorboard_log_dir is not None: + if self.tensorboard_log_level not in TENSORBOARD_LOG_LEVELS: + raise ValueError( + f"Provided '{TENSORBOARD_LOG_LEVEL}' ('{self.tensorboard_log_level}') " + f"is invalid! Valid values are: {TENSORBOARD_LOG_LEVELS}" + ) + self.tensorboard_log_on_epochs = self.tensorboard_log_level == "epoch" + + current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + class_name = self.__class__.__name__ + + train_log_dir = ( + f"{self.tensorboard_log_dir}/{class_name}/{current_time}/train" + ) + test_log_dir = ( + f"{self.tensorboard_log_dir}/{class_name}/{current_time}/test" + ) + + self.train_summary_writer = tf.summary.create_file_writer(train_log_dir) + self.test_summary_writer = tf.summary.create_file_writer(test_log_dir) + + self.model_summary_file = f"{self.tensorboard_log_dir}/{class_name}/{current_time}/model_summary.txt" + + def batch_loss( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> tf.Tensor: + raise NotImplementedError + + def batch_predict( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> Dict[Text, tf.Tensor]: + raise NotImplementedError + + def fit( + self, + model_data: RasaModelData, + epochs: int, + batch_size: Union[List[int], int], + evaluate_on_num_examples: int, + evaluate_every_num_epochs: int, + batch_strategy: Text, + silent: bool = False, + loading: bool = False, + eager: bool = False, + ) -> None: + """Fit model data""" + + # don't setup tensorboard writers when training during loading + if not loading: + self._set_up_tensorboard_writer() + + tf.random.set_seed(self.random_seed) + np.random.seed(self.random_seed) + + disable = silent or is_logging_disabled() + + evaluation_model_data = None + if evaluate_on_num_examples > 0: + if not disable: + logger.info( + f"Validation accuracy is calculated every " + f"{evaluate_every_num_epochs} epochs." + ) + + model_data, evaluation_model_data = model_data.split( + evaluate_on_num_examples, self.random_seed + ) + + ( + train_dataset_function, + tf_train_on_batch_function, + ) = self._get_tf_train_functions(eager, model_data, batch_strategy) + ( + evaluation_dataset_function, + tf_evaluation_on_batch_function, + ) = self._get_tf_evaluation_functions(eager, evaluation_model_data) + + val_results = {} # validation is not performed every epoch + progress_bar = tqdm(range(epochs), desc="Epochs", disable=disable) + + training_steps = 0 + + for epoch in progress_bar: + epoch_batch_size = self.linearly_increasing_batch_size( + epoch, batch_size, epochs + ) + + training_steps = self._batch_loop( + train_dataset_function, + tf_train_on_batch_function, + epoch_batch_size, + True, + training_steps, + self.train_summary_writer, + ) + + if self.tensorboard_log_on_epochs: + self._log_metrics_for_tensorboard(epoch, self.train_summary_writer) + + postfix_dict = self._get_metric_results() + + if evaluate_on_num_examples > 0: + if self._should_evaluate(evaluate_every_num_epochs, epochs, epoch): + self._batch_loop( + evaluation_dataset_function, + tf_evaluation_on_batch_function, + epoch_batch_size, + False, + training_steps, + self.test_summary_writer, + ) + + if self.tensorboard_log_on_epochs: + self._log_metrics_for_tensorboard( + epoch, self.test_summary_writer + ) + + val_results = self._get_metric_results(prefix="val_") + + postfix_dict.update(val_results) + + progress_bar.set_postfix(postfix_dict) + + if self.model_summary_file is not None: + self._write_model_summary() + + self._training = None # training phase should be defined when building a graph + if not disable: + logger.info("Finished training.") + + def train_on_batch( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> None: + """Train on batch""" + + # calculate supervision and regularization losses separately + with tf.GradientTape(persistent=True) as tape: + prediction_loss = self.batch_loss(batch_in) + regularization_loss = tf.math.add_n(self.losses) + total_loss = prediction_loss + regularization_loss + + self.total_loss.update_state(total_loss) + + # calculate the gradients that come from supervision signal + prediction_gradients = tape.gradient(prediction_loss, self.trainable_variables) + # calculate the gradients that come from regularization + regularization_gradients = tape.gradient( + regularization_loss, self.trainable_variables + ) + # delete gradient tape manually + # since it was created with `persistent=True` option + del tape + + gradients = [] + for pred_grad, reg_grad in zip(prediction_gradients, regularization_gradients): + if pred_grad is not None and reg_grad is not None: + # remove regularization gradient for variables + # that don't have prediction gradient + gradients.append( + pred_grad + + tf.where(pred_grad > 0, reg_grad, tf.zeros_like(reg_grad)) + ) + else: + gradients.append(pred_grad) + + self.optimizer.apply_gradients(zip(gradients, self.trainable_variables)) + + def build_for_predict( + self, predict_data: RasaModelData, eager: bool = False + ) -> None: + self._training = False # needed for tf graph mode + self._predict_function = self._get_tf_call_model_function( + predict_data.as_tf_dataset, self.batch_predict, eager, "prediction" + ) + + def predict(self, predict_data: RasaModelData) -> Dict[Text, tf.Tensor]: + if self._predict_function is None: + logger.debug("There is no tensorflow prediction graph.") + self.build_for_predict(predict_data) + + # Prepare a single batch of size 1 + batch_in = predict_data.prepare_batch(start=0, end=1) + + self._training = False # needed for eager mode + return self._predict_function(batch_in) + + def save(self, model_file_name: Text) -> None: + self.save_weights(model_file_name, save_format="tf") + + @classmethod + def load( + cls, model_file_name: Text, model_data_example: RasaModelData, *args, **kwargs + ) -> "RasaModel": + logger.debug("Loading the model ...") + # create empty model + model = cls(*args, **kwargs) + # need to train on 1 example to build weights of the correct size + model.fit( + model_data_example, + epochs=1, + batch_size=1, + evaluate_every_num_epochs=0, + evaluate_on_num_examples=0, + batch_strategy=SEQUENCE, + silent=True, # don't confuse users with training output + loading=True, # don't use tensorboard while loading + eager=True, # no need to build tf graph, eager is faster here + ) + # load trained weights + model.load_weights(model_file_name) + + logger.debug("Finished loading the model.") + return model + + def _total_batch_loss( + self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]] + ) -> tf.Tensor: + """Calculate total loss""" + + prediction_loss = self.batch_loss(batch_in) + regularization_loss = tf.math.add_n(self.losses) + total_loss = prediction_loss + regularization_loss + self.total_loss.update_state(total_loss) + + return total_loss + + def _batch_loop( + self, + dataset_function: Callable, + call_model_function: Callable, + batch_size: int, + training: bool, + offset: int, + writer: Optional["ResourceSummaryWriter"] = None, + ) -> int: + """Run on batches""" + self.reset_metrics() + + step = offset + + self._training = training # needed for eager mode + for batch_in in dataset_function(batch_size): + call_model_function(batch_in) + + if not self.tensorboard_log_on_epochs: + self._log_metrics_for_tensorboard(step, writer) + + step += 1 + + return step + + @staticmethod + def _get_tf_call_model_function( + dataset_function: Callable, + call_model_function: Callable, + eager: bool, + phase: Text, + ) -> Callable: + """Convert functions to tensorflow functions""" + + if eager: + return call_model_function + + logger.debug(f"Building tensorflow {phase} graph...") + + init_dataset = dataset_function(1) + tf_call_model_function = tf.function( + call_model_function, input_signature=[init_dataset.element_spec] + ) + tf_call_model_function(next(iter(init_dataset))) + + logger.debug(f"Finished building tensorflow {phase} graph.") + + return tf_call_model_function + + def _get_tf_train_functions( + self, eager: bool, model_data: RasaModelData, batch_strategy: Text + ) -> Tuple[Callable, Callable]: + """Create train tensorflow functions""" + + def train_dataset_function(_batch_size: int) -> tf.data.Dataset: + return model_data.as_tf_dataset(_batch_size, batch_strategy, shuffle=True) + + self._training = True # needed for tf graph mode + return ( + train_dataset_function, + self._get_tf_call_model_function( + train_dataset_function, self.train_on_batch, eager, "train" + ), + ) + + def _get_tf_evaluation_functions( + self, eager: bool, evaluation_model_data: Optional[RasaModelData] + ) -> Tuple[Optional[Callable], Optional[Callable]]: + """Create evaluation tensorflow functions""" + + if evaluation_model_data is None: + return None, None + + def evaluation_dataset_function(_batch_size: int) -> tf.data.Dataset: + return evaluation_model_data.as_tf_dataset( + _batch_size, SEQUENCE, shuffle=False + ) + + self._training = False # needed for tf graph mode + return ( + evaluation_dataset_function, + self._get_tf_call_model_function( + evaluation_dataset_function, self._total_batch_loss, eager, "evaluation" + ), + ) + + def _get_metric_results(self, prefix: Optional[Text] = None) -> Dict[Text, Text]: + """Get the metrics results""" + prefix = prefix or "" + + return { + f"{prefix}{metric.name}": f"{metric.result().numpy():.3f}" + for metric in self.metrics + if metric.name in self.metrics_to_log + } + + def _log_metrics_for_tensorboard( + self, step: int, writer: Optional["ResourceSummaryWriter"] = None + ) -> None: + if writer is not None: + with writer.as_default(): + for metric in self.metrics: + if metric.name in self.metrics_to_log: + tf.summary.scalar(metric.name, metric.result(), step=step) + + @staticmethod + def _should_evaluate( + evaluate_every_num_epochs: int, epochs: int, current_epoch: int + ) -> bool: + return ( + current_epoch == 0 + or (current_epoch + 1) % evaluate_every_num_epochs == 0 + or (current_epoch + 1) == epochs + ) + + @staticmethod + def batch_to_model_data_format( + batch: Union[Tuple[tf.Tensor], Tuple[np.ndarray]], + data_signature: Dict[Text, List[FeatureSignature]], + ) -> Dict[Text, List[tf.Tensor]]: + """Convert input batch tensors into batch data format. + + Batch contains any number of batch data. The order is equal to the + key-value pairs in session data. As sparse data were converted into indices, + data, shape before, this methods converts them into sparse tensors. Dense data + is kept. + """ + + batch_data = defaultdict(list) + + idx = 0 + for k, signature in data_signature.items(): + for is_sparse, feature_dimension in signature: + if is_sparse: + # explicitly substitute last dimension in shape with known + # static value + batch_data[k].append( + tf.SparseTensor( + batch[idx], + batch[idx + 1], + [batch[idx + 2][0], batch[idx + 2][1], feature_dimension], + ) + ) + idx += 3 + else: + if isinstance(batch[idx], tf.Tensor): + batch_data[k].append(batch[idx]) + else: + # convert to Tensor + batch_data[k].append(tf.constant(batch[idx], dtype=tf.float32)) + idx += 1 + + return batch_data + + @staticmethod + def linearly_increasing_batch_size( + epoch: int, batch_size: Union[List[int], int], epochs: int + ) -> int: + """Linearly increase batch size with every epoch. + + The idea comes from https://arxiv.org/abs/1711.00489. + """ + + if not isinstance(batch_size, list): + return int(batch_size) + + if epochs > 1: + return int( + batch_size[0] + epoch * (batch_size[1] - batch_size[0]) / (epochs - 1) + ) + else: + return int(batch_size[0]) + + def _write_model_summary(self): + total_number_of_variables = np.sum( + [np.prod(v.shape) for v in self.trainable_variables] + ) + layers = [ + f"{layer.name} ({layer.dtype.name}) " + f"[{'x'.join(str(s) for s in layer.shape)}]" + for layer in self.trainable_variables + ] + layers.reverse() + + with open(self.model_summary_file, "w") as file: + file.write("Variables: name (type) [shape]\n\n") + for layer in layers: + file.write(layer) + file.write("\n") + file.write("\n") + file.write(f"Total size of variables: {total_number_of_variables}") + + def compile(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def evaluate(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def test_on_batch(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def predict_on_batch(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def fit_generator(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def evaluate_generator(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) + + def predict_generator(self, *args, **kwargs) -> None: + raise Exception( + "This method should neither be called nor implemented in our code." + ) diff --git a/rasa/utils/tensorflow/transformer.py b/rasa/utils/tensorflow/transformer.py new file mode 100644 index 000000000000..222cfb89d16c --- /dev/null +++ b/rasa/utils/tensorflow/transformer.py @@ -0,0 +1,629 @@ +from typing import List, Optional, Text, Tuple, Union +import tensorflow as tf +import tensorflow_addons as tfa +from tensorflow.python.keras.utils import tf_utils +from tensorflow.python.keras import backend as K +import numpy as np +from rasa.utils.tensorflow.layers import DenseWithSparseWeights + + +# from https://www.tensorflow.org/tutorials/text/transformer +# and https://github.com/tensorflow/tensor2tensor +class MultiHeadAttention(tf.keras.layers.Layer): + """Multi-headed attention layer. + + Arguments: + units: Positive integer, output dim of hidden layer. + num_heads: Positive integer, number of heads + to repeat the same attention structure. + attention_dropout_rate: Float, dropout rate inside attention for training. + sparsity: Float between 0 and 1. Fraction of the `kernel` + weights to set to zero. + unidirectional: Boolean, use a unidirectional or bidirectional encoder. + use_key_relative_position: Boolean, if 'True' use key + relative embeddings in attention. + use_value_relative_position: Boolean, if 'True' use value + relative embeddings in attention. + max_relative_position: Positive integer, max position for relative embeddings. + heads_share_relative_embedding: Boolean, if 'True' + heads will share relative embeddings. + """ + + def __init__( + self, + units: int, + num_heads: int, + attention_dropout_rate: float = 0.0, + sparsity: float = 0.8, + unidirectional: bool = False, + use_key_relative_position: bool = False, + use_value_relative_position: bool = False, + max_relative_position: Optional[int] = None, + heads_share_relative_embedding: bool = False, + ) -> None: + super().__init__() + + if units % num_heads != 0: + raise ValueError( + f"number of units {units} should be proportional to " + f"number of attention heads {num_heads}." + ) + + self.num_heads = num_heads + self.units = units + self.attention_dropout_rate = attention_dropout_rate + self.unidirectional = unidirectional + self.use_key_relative_position = use_key_relative_position + self.use_value_relative_position = use_value_relative_position + self.relative_length = max_relative_position + if self.relative_length is not None: + self.relative_length += 1 # include current time + self.heads_share_relative_embedding = heads_share_relative_embedding + + self._depth = units // self.num_heads + + # process queries + self._query_dense_layer = DenseWithSparseWeights( + units=units, use_bias=False, sparsity=sparsity + ) + # process keys + self._key_dense_layer = DenseWithSparseWeights( + units=units, use_bias=False, sparsity=sparsity + ) + # process values + self._value_dense_layer = DenseWithSparseWeights( + units=units, use_bias=False, sparsity=sparsity + ) + # process attention output + self._output_dense_layer = DenseWithSparseWeights( + units=units, sparsity=sparsity + ) + + self._create_relative_embeddings() + + def _create_relative_embeddings(self) -> None: + """Create relative embeddings.""" + + relative_embedding_shape = None + self.key_relative_embeddings = None + self.value_relative_embeddings = None + + if self.use_key_relative_position or self.use_value_relative_position: + if not self.relative_length: + raise ValueError( + f"Max relative position {self.relative_length} " + f"should be > 0 when using relative attention." + ) + + if self.unidirectional: + relative_length = self.relative_length + else: + relative_length = 2 * self.relative_length - 1 + + if self.heads_share_relative_embedding: + relative_embedding_shape = (relative_length, self._depth) + else: + relative_embedding_shape = ( + self.num_heads, + relative_length, + self._depth, + ) + + if self.use_key_relative_position: + self.key_relative_embeddings = self.add_weight( + shape=relative_embedding_shape, name="key_relative_embeddings" + ) + + if self.use_value_relative_position: + self.value_relative_embeddings = self.add_weight( + shape=relative_embedding_shape, name="value_relative_embeddings" + ) + + def _pad_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor: + # pad the left side to length + pad_left = x[:, :, :, :1, :] + pad_left = tf.tile(pad_left, (1, 1, 1, length - self.relative_length, 1)) + + # pad the right side to length + if self.unidirectional: + right_relative_length = 1 # current time + pad_right = tf.zeros_like(x[:, :, :, -1:, :]) + else: + right_relative_length = self.relative_length + pad_right = x[:, :, :, -1:, :] + pad_right = tf.tile(pad_right, (1, 1, 1, length - right_relative_length, 1)) + + return tf.concat([pad_left, x, pad_right], axis=-2) + + def _slice_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor: + if self.unidirectional: + # pad the right side to relative_length + pad_right = tf.zeros_like(x[:, :, :, -1:, :]) + pad_right = tf.tile(pad_right, (1, 1, 1, self.relative_length - 1, 1)) + x = tf.concat([x, pad_right], axis=-2) + + extra_length = self.relative_length - length + full_length = tf.shape(x)[-2] + return x[:, :, :, extra_length : full_length - extra_length, :] + + def _relative_to_absolute_position(self, x: tf.Tensor) -> tf.Tensor: + """Universal method to convert tensor from relative to absolute indexing. + + "Slides" relative embeddings by 45 degree. + + Arguments: + x: A tensor of shape (batch, num_heads, length, relative_length, depth) + or (batch, num_heads, length, relative_length) + + Returns: + A tensor of shape (batch, num_heads, length, length, depth) + or (batch, num_heads, length, length) + """ + + x_dim = len(x.shape) + + if x_dim < 4 or x_dim > 5: + raise ValueError( + f"Relative tensor has a wrong shape {x.shape}, " + f"it should have 4 or 5 dimensions." + ) + if x_dim == 4: + # add fake depth dimension + x = tf.expand_dims(x, axis=-1) + + batch = tf.shape(x)[0] + num_heads = tf.shape(x)[1] + length = tf.shape(x)[2] + depth = tf.shape(x)[-1] + + x = tf.cond( + length > self.relative_length, + lambda: self._pad_relative_embeddings(x, length), + lambda: self._slice_relative_embeddings(x, length), + ) + + # add a column of zeros to "slide" columns to diagonals through reshape + pad_shift = tf.zeros_like(x[:, :, :, -1:, :]) + x = tf.concat([x, pad_shift], axis=-2) + + # flatten length dimensions + x = tf.reshape(x, (batch, num_heads, -1, depth)) + width = 2 * length + + # add zeros so that the result of back reshape is still a matrix + pad_flat = tf.zeros_like( + x[:, :, : ((width - 1) - width * length % (width - 1)) % (width - 1), :] + ) + x = tf.concat([x, pad_flat], axis=-2) + + # "slide" columns to diagonals through reshape + x = tf.reshape(x, (batch, num_heads, -1, width - 1, depth)) + + # slice needed "diagonal" matrix + x = x[:, :, :-1, -length:, :] + + if x_dim == 4: + # remove fake depth dimension + x = tf.squeeze(x, axis=-1) + + return x + + def _matmul_with_relative_keys(self, x: tf.Tensor) -> tf.Tensor: + y = self.key_relative_embeddings + + if self.heads_share_relative_embedding: + matmul = tf.einsum("bhld,md->bhlm", x, y) + else: + matmul = tf.einsum("bhld,hmd->bhlm", x, y) + + return self._relative_to_absolute_position(matmul) + + def _tile_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor: + if self.heads_share_relative_embedding: + x = tf.expand_dims(x, axis=0) # add head dimension + + x = tf.expand_dims(x, axis=1) # add length dimension + x = tf.tile(x, (1, length, 1, 1)) + return tf.expand_dims(x, axis=0) # add batch dimension + + def _squeeze_relative_embeddings(self, x: tf.Tensor) -> tf.Tensor: + x = tf.squeeze(x, axis=0) # squeeze batch dimension + if self.heads_share_relative_embedding: + x = tf.squeeze(x, axis=1) # squeeze head dimension + return x + + def _matmul_with_relative_values(self, x: tf.Tensor) -> tf.Tensor: + y = self._tile_relative_embeddings( + self.value_relative_embeddings, tf.shape(x)[-2] + ) + y = self._relative_to_absolute_position(y) + y = self._squeeze_relative_embeddings(y) + + if self.heads_share_relative_embedding: + return tf.einsum("bhlm,lmd->bhld", x, y) + else: + return tf.einsum("bhlm,hlmd->bhld", x, y) + + def _drop_attention_logits( + self, logits: tf.Tensor, pad_mask: tf.Tensor, training: tf.Tensor + ) -> tf.Tensor: + def droped_logits() -> tf.Tensor: + keep_prob = tf.random.uniform(tf.shape(logits), 0, 1) + pad_mask + drop_mask = tf.cast( + tf.less(keep_prob, self.attention_dropout_rate), logits.dtype + ) + + return logits + drop_mask * -1e9 + + return tf_utils.smart_cond(training, droped_logits, lambda: tf.identity(logits)) + + def _scaled_dot_product_attention( + self, + query: tf.Tensor, + key: tf.Tensor, + value: tf.Tensor, + pad_mask: tf.Tensor, + training: tf.Tensor, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Calculate the attention weights. + + query, key, value must have matching leading dimensions. + key, value must have matching penultimate dimension, + i.e.: seq_len_k = seq_len_v. + The mask has different shapes depending on its type (padding or look ahead) + but it must be broadcastable for addition. + + Arguments: + query: A tensor with shape (..., length, depth). + key: A tensor with shape (..., length, depth). + value: A tensor with shape (..., length, depth). + pad_mask: Float tensor with shape broadcastable + to (..., length, length). Defaults to None. + + Returns: + output: A tensor with shape (..., length, depth). + attention_weights: A tensor with shape (..., length, length). + """ + + matmul_qk = tf.matmul(query, key, transpose_b=True) # (..., length, length) + + if self.use_key_relative_position: + matmul_qk += self._matmul_with_relative_keys(query) + + # scale matmul_qk + dk = tf.cast(tf.shape(key)[-1], tf.float32) + logits = matmul_qk / tf.math.sqrt(dk) + + # add the mask to the scaled tensor. + if pad_mask is not None: + logits += pad_mask * -1e9 + + # apply attention dropout before softmax to maintain attention_weights norm as 1 + if self.attention_dropout_rate > 0: + logits = self._drop_attention_logits(logits, pad_mask, training) + + # softmax is normalized on the last axis (length) so that the scores + # add up to 1. + attention_weights = tf.nn.softmax(logits, axis=-1) # (..., length, length) + + output = tf.matmul(attention_weights, value) # (..., length, depth) + if self.use_value_relative_position: + output += self._matmul_with_relative_values(attention_weights) + + return output, attention_weights + + def _split_heads(self, x: tf.Tensor) -> tf.Tensor: + """Split the last dimension into (num_heads, depth). + + Transpose the result such that the shape is + (batch_size, num_heads, length, depth) + """ + + x = tf.reshape(x, (tf.shape(x)[0], -1, self.num_heads, self._depth)) + return tf.transpose(x, perm=[0, 2, 1, 3]) + + def _combine_heads(self, x: tf.Tensor) -> tf.Tensor: + """Inverse of split_heads. + + Args: + x: A Tensor with shape [batch, num_heads, length, units / num_heads] + + Returns: + A Tensor with shape [batch, length, units] + """ + + # (batch_size, length, num_heads, depth) + x = tf.transpose(x, perm=[0, 2, 1, 3]) + # (batch_size, length, units) + return tf.reshape(x, (tf.shape(x)[0], -1, self.units)) + + # noinspection PyMethodOverriding + def call( + self, + query_input: tf.Tensor, + source_input: tf.Tensor, + pad_mask: Optional[tf.Tensor] = None, + training: Optional[Union[tf.Tensor, bool]] = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Apply attention mechanism to query_input and source_input. + + Arguments: + query_input: A tensor with shape [batch_size, length, input_size]. + source_input: A tensor with shape [batch_size, length, input_size]. + pad_mask: Float tensor with shape broadcastable + to (..., length, length). Defaults to None. + training: A bool, whether in training mode or not. + + Returns: + Attention layer output with shape [batch_size, length, units] + """ + if training is None: + training = K.learning_phase() + + query = self._query_dense_layer(query_input) # (batch_size, length, units) + key = self._key_dense_layer(source_input) # (batch_size, length, units) + value = self._value_dense_layer(source_input) # (batch_size, length, units) + + query = self._split_heads(query) # (batch_size, num_heads, length, depth) + key = self._split_heads(key) # (batch_size, num_heads, length, depth) + value = self._split_heads(value) # (batch_size, num_heads, length, depth) + + attention, attention_weights = self._scaled_dot_product_attention( + query, key, value, pad_mask, training + ) + # attention.shape == (batch_size, num_heads, length, depth) + # attention_weights.shape == (batch_size, num_heads, length, length) + attention = self._combine_heads(attention) # (batch_size, length, units) + + output = self._output_dense_layer(attention) # (batch_size, length, units) + + return output, attention_weights + + +class TransformerEncoderLayer(tf.keras.layers.Layer): + """Transformer encoder layer. + + The layer is composed of the sublayers: + 1. Self-attention layer + 2. Feed-forward network (which is 2 fully-connected layers) + + Arguments: + units: Positive integer, output dim of hidden layer. + num_heads: Positive integer, number of heads + to repeat the same attention structure. + filter_units: Positive integer, output dim of the first ffn hidden layer. + dropout_rate: Float between 0 and 1; fraction of the input units to drop. + attention_dropout_rate: Float, dropout rate inside attention for training. + sparsity: Float between 0 and 1. Fraction of the `kernel` + weights to set to zero. + unidirectional: Boolean, use a unidirectional or bidirectional encoder. + use_key_relative_position: Boolean, if 'True' use key + relative embeddings in attention. + use_value_relative_position: Boolean, if 'True' use value + relative embeddings in attention. + max_relative_position: Positive integer, max position for relative embeddings. + heads_share_relative_embedding: Boolean, if 'True' + heads will share relative embeddings. + """ + + def __init__( + self, + units: int, + num_heads: int, + filter_units: int, + dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + sparsity: float = 0.8, + unidirectional: bool = False, + use_key_relative_position: bool = False, + use_value_relative_position: bool = False, + max_relative_position: Optional[int] = None, + heads_share_relative_embedding: bool = False, + ) -> None: + super().__init__() + + self._layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) + self._mha = MultiHeadAttention( + units, + num_heads, + attention_dropout_rate, + sparsity, + unidirectional, + use_key_relative_position, + use_value_relative_position, + max_relative_position, + heads_share_relative_embedding, + ) + self._dropout = tf.keras.layers.Dropout(dropout_rate) + + self._ffn_layers = [ + tf.keras.layers.LayerNormalization(epsilon=1e-6), + DenseWithSparseWeights( + units=filter_units, activation=tfa.activations.gelu, sparsity=sparsity + ), # (batch_size, length, filter_units) + tf.keras.layers.Dropout(dropout_rate), + DenseWithSparseWeights( + units=units, sparsity=sparsity + ), # (batch_size, length, units) + tf.keras.layers.Dropout(dropout_rate), + ] + + def call( + self, + x: tf.Tensor, + pad_mask: Optional[tf.Tensor] = None, + training: Optional[Union[tf.Tensor, bool]] = None, + ) -> tf.Tensor: + """Apply transformer encoder layer. + + Arguments: + x: A tensor with shape [batch_size, length, units]. + pad_mask: Float tensor with shape broadcastable + to (..., length, length). Defaults to None. + training: A bool, whether in training mode or not. + + Returns: + Transformer encoder layer output with shape [batch_size, length, units] + """ + if training is None: + training = K.learning_phase() + + x_norm = self._layer_norm(x) # (batch_size, length, units) + attn_out, _ = self._mha(x_norm, x_norm, pad_mask=pad_mask, training=training) + attn_out = self._dropout(attn_out, training=training) + x += attn_out + + ffn_out = x # (batch_size, length, units) + for layer in self._ffn_layers: + ffn_out = layer(ffn_out, training=training) + x += ffn_out + + return x # (batch_size, length, units) + + +class TransformerEncoder(tf.keras.layers.Layer): + """Transformer encoder. + + Encoder stack is made up of `num_layers` identical encoder layers. + + Arguments: + num_layers: Positive integer, number of encoder layers. + units: Positive integer, output dim of hidden layer. + num_heads: Positive integer, number of heads + to repeat the same attention structure. + filter_units: Positive integer, output dim of the first ffn hidden layer. + reg_lambda: Float, regularization factor. + dropout_rate: Float between 0 and 1; fraction of the input units to drop. + attention_dropout_rate: Float, dropout rate inside attention for training. + sparsity: Float between 0 and 1. Fraction of the `kernel` + weights to set to zero. + unidirectional: Boolean, use a unidirectional or bidirectional encoder. + use_key_relative_position: Boolean, if 'True' use key + relative embeddings in attention. + use_value_relative_position: Boolean, if 'True' use value + relative embeddings in attention. + max_relative_position: Positive integer, max position for relative embeddings. + heads_share_relative_embedding: Boolean, if 'True' + heads will share relative embeddings. + name: Optional name of the layer. + """ + + def __init__( + self, + num_layers: int, + units: int, + num_heads: int, + filter_units: int, + reg_lambda: float, + dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + sparsity: float = 0.8, + unidirectional: bool = False, + use_key_relative_position: bool = False, + use_value_relative_position: bool = False, + max_relative_position: Optional[int] = None, + heads_share_relative_embedding: bool = False, + name: Optional[Text] = None, + ) -> None: + super().__init__(name=name) + + self.units = units + self.unidirectional = unidirectional + + l2_regularizer = tf.keras.regularizers.l2(reg_lambda) + self._embedding = DenseWithSparseWeights( + units=units, kernel_regularizer=l2_regularizer, sparsity=sparsity + ) + # positional encoding helpers + self._angles = self._get_angles() + self._even_indices = np.arange(0, self.units, 2, dtype=np.int32)[:, np.newaxis] + self._odd_indices = np.arange(1, self.units, 2, dtype=np.int32)[:, np.newaxis] + + self._dropout = tf.keras.layers.Dropout(dropout_rate) + + self._enc_layers = [ + TransformerEncoderLayer( + units, + num_heads, + filter_units, + dropout_rate, + attention_dropout_rate, + sparsity, + unidirectional, + use_key_relative_position, + use_value_relative_position, + max_relative_position, + heads_share_relative_embedding, + ) + for _ in range(num_layers) + ] + self._layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) + + def _get_angles(self) -> np.ndarray: + i = np.arange(self.units)[np.newaxis, :] + return 1 / np.power(10000, (2 * (i // 2)) / np.float32(self.units)) + + def _positional_encoding(self, max_position: tf.Tensor) -> tf.Tensor: + max_position = tf.cast(max_position, dtype=tf.float32) + angle_rads = tf.range(max_position)[:, tf.newaxis] * self._angles + + # transpose for easy slicing + angle_rads = tf.transpose(angle_rads, perm=[1, 0]) + shape = tf.shape(angle_rads) + # apply sin to even indices in the array; 2i + sin_even = tf.sin(tf.gather_nd(angle_rads, self._even_indices)) + pos_encoding_even = tf.scatter_nd(self._even_indices, sin_even, shape) + # apply cos to odd indices in the array; 2i+1 + cos_odd = tf.cos(tf.gather_nd(angle_rads, self._odd_indices)) + pos_encoding_odd = tf.scatter_nd(self._odd_indices, cos_odd, shape) + # combine even and odd positions and transpose back + pos_encoding = tf.transpose(pos_encoding_even + pos_encoding_odd, perm=[1, 0]) + # add batch dimension + return tf.stop_gradient(pos_encoding[tf.newaxis, ...]) + + @staticmethod + def _look_ahead_pad_mask(max_position: tf.Tensor) -> tf.Tensor: + pad_mask = 1 - tf.linalg.band_part(tf.ones((max_position, max_position)), -1, 0) + return pad_mask[tf.newaxis, tf.newaxis, :, :] # (1, 1, seq_len, seq_len) + + def call( + self, + x: tf.Tensor, + pad_mask: Optional[tf.Tensor] = None, + training: Optional[Union[tf.Tensor, bool]] = None, + ) -> tf.Tensor: + """Apply transformer encoder. + + Arguments: + x: A tensor with shape [batch_size, length, input_size]. + pad_mask: Float tensor with shape broadcastable + to (..., length, length). Defaults to None. + training: A bool, whether in training mode or not. + + Returns: + Transformer encoder output with shape [batch_size, length, units] + """ + + # adding embedding and position encoding. + x = self._embedding(x) # (batch_size, length, units) + x *= tf.math.sqrt(tf.cast(self.units, tf.float32)) + x += self._positional_encoding(tf.shape(x)[1]) + x = self._dropout(x, training=training) + + if pad_mask is not None: + pad_mask = tf.squeeze(pad_mask, -1) # (batch_size, length) + pad_mask = pad_mask[:, tf.newaxis, tf.newaxis, :] + # pad_mask.shape = (batch_size, 1, 1, length) + if self.unidirectional: + # add look ahead pad mask to emulate unidirectional behavior + pad_mask = tf.minimum( + 1.0, pad_mask + self._look_ahead_pad_mask(tf.shape(pad_mask)[-1]) + ) # (batch_size, 1, length, length) + + for layer in self._enc_layers: + x = layer(x, pad_mask=pad_mask, training=training) + + # if normalization is done in encoding layers, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return self._layer_norm(x) # (batch_size, length, units) diff --git a/rasa/utils/train_utils.py b/rasa/utils/train_utils.py index 7befdecd4dc5..bc19adb39867 100644 --- a/rasa/utils/train_utils.py +++ b/rasa/utils/train_utils.py @@ -1,967 +1,231 @@ -from collections import namedtuple -import logging import numpy as np -from tqdm import tqdm -from sklearn.model_selection import train_test_split -import tensorflow as tf -from tensor2tensor.models.transformer import ( - transformer_base, - transformer_prepare_encoder, - transformer_encoder, +import logging +from typing import Optional, Text, Dict, Any, Union, List, Tuple + +from rasa.core.constants import DIALOGUE +from rasa.nlu.constants import TEXT, NUMBER_OF_SUB_TOKENS +from rasa.nlu.tokenizers.tokenizer import Token +import rasa.utils.io as io_utils +from rasa.utils.tensorflow.constants import ( + LABEL, + HIDDEN_LAYERS_SIZES, + NUM_TRANSFORMER_LAYERS, + NUM_HEADS, + DENSE_DIMENSION, + LOSS_TYPE, + SIMILARITY_TYPE, + NUM_NEG, + EVAL_NUM_EXAMPLES, + EVAL_NUM_EPOCHS, + REGULARIZATION_CONSTANT, + USE_MAX_NEG_SIM, + MAX_NEG_SIM, + MAX_POS_SIM, + EMBEDDING_DIMENSION, + DROP_RATE_DIALOGUE, + DROP_RATE_LABEL, + NEGATIVE_MARGIN_SCALE, + DROP_RATE, + EPOCHS, + SOFTMAX, + MARGIN, + AUTO, + INNER, + COSINE, ) -from tensor2tensor.layers.common_attention import large_compatible_negative -from rasa.utils.common import is_logging_disabled -import typing -from typing import List, Optional, Text, Dict, Tuple, Union, Generator, Callable, Any -if typing.TYPE_CHECKING: - from tensor2tensor.utils.hparam import HParams -# avoid warning println on contrib import - remove for tf 2 -tf.contrib._warning = None logger = logging.getLogger(__name__) -# namedtuple for all tf session related data -SessionData = namedtuple("SessionData", ("X", "Y", "label_ids")) - - -def load_tf_config(config: Dict[Text, Any]) -> Optional[tf.ConfigProto]: - """Prepare tf.ConfigProto for training""" - if config.get("tf_config") is not None: - return tf.ConfigProto(**config.pop("tf_config")) - else: - return None - -# noinspection PyPep8Naming -def train_val_split( - session_data: "SessionData", evaluate_on_num_examples: int, random_seed: int -) -> Tuple["SessionData", "SessionData"]: - """Create random hold out validation set using stratified split.""" - - label_counts = dict( - zip(*np.unique(session_data.label_ids, return_counts=True, axis=0)) - ) - - if evaluate_on_num_examples >= len(session_data.X) - len(label_counts): - raise ValueError( - "Validation set of {} is too large. Remaining train set " - "should be at least equal to number of classes {}." - "".format(evaluate_on_num_examples, len(label_counts)) - ) - elif evaluate_on_num_examples < len(label_counts): - raise ValueError( - "Validation set of {} is too small. It should be " - "at least equal to number of classes {}." - "".format(evaluate_on_num_examples, len(label_counts)) - ) - - counts = np.array([label_counts[label] for label in session_data.label_ids]) - - multi_X = session_data.X[counts > 1] - multi_Y = session_data.Y[counts > 1] - multi_label_ids = session_data.label_ids[counts > 1] +def normalize(values: np.ndarray, ranking_length: Optional[int] = 0) -> np.ndarray: + """Normalizes an array of positive numbers over the top `ranking_length` values. + Other values will be set to 0. + """ - solo_X = session_data.X[counts == 1] - solo_Y = session_data.Y[counts == 1] - solo_label_ids = session_data.label_ids[counts == 1] + new_values = values.copy() # prevent mutation of the input + if 0 < ranking_length < len(new_values): + ranked = sorted(new_values, reverse=True) + new_values[new_values < ranked[ranking_length - 1]] = 0 - (X_train, X_val, Y_train, Y_val, label_ids_train, label_ids_val) = train_test_split( - multi_X, - multi_Y, - multi_label_ids, - test_size=evaluate_on_num_examples, - random_state=random_seed, - stratify=multi_label_ids, - ) - X_train = np.concatenate([X_train, solo_X]) - Y_train = np.concatenate([Y_train, solo_Y]) - label_ids_train = np.concatenate([label_ids_train, solo_label_ids]) + if np.sum(new_values) > 0: + new_values = new_values / np.sum(new_values) - return ( - SessionData(X=X_train, Y=Y_train, label_ids=label_ids_train), - SessionData(X=X_val, Y=Y_val, label_ids=label_ids_val), - ) + return new_values -def shuffle_session_data(session_data: "SessionData") -> "SessionData": - """Shuffle session data.""" +def update_similarity_type(config: Dict[Text, Any]) -> Dict[Text, Any]: + """ + If SIMILARITY_TYPE is set to 'auto', update the SIMILARITY_TYPE depending + on the LOSS_TYPE. + Args: + config: model configuration - ids = np.random.permutation(len(session_data.X)) - return SessionData( - X=session_data.X[ids], - Y=session_data.Y[ids], - label_ids=session_data.label_ids[ids], - ) + Returns: updated model configuration + """ + if config.get(SIMILARITY_TYPE) == AUTO: + if config[LOSS_TYPE] == SOFTMAX: + config[SIMILARITY_TYPE] = INNER + elif config[LOSS_TYPE] == MARGIN: + config[SIMILARITY_TYPE] = COSINE + return config -def split_session_data_by_label( - session_data: "SessionData", unique_label_ids: "np.ndarray" -) -> List["SessionData"]: - """Reorganize session data into a list of session data with the same labels.""" - label_data = [] - for label_id in unique_label_ids: - label_data.append( - SessionData( - X=session_data.X[session_data.label_ids == label_id], - Y=session_data.Y[session_data.label_ids == label_id], - label_ids=session_data.label_ids[session_data.label_ids == label_id], - ) - ) - return label_data +def align_token_features( + list_of_tokens: List[List[Token]], + in_token_features: np.ndarray, + shape: Optional[Tuple] = None, +) -> np.ndarray: + """Align token features to match tokens. + ConveRTTokenizer, LanguageModelTokenizers might split up tokens into sub-tokens. + We need to take the mean of the sub-token vectors and take that as token vector. -# noinspection PyPep8Naming -def balance_session_data( - session_data: "SessionData", batch_size: int, shuffle: bool -) -> "SessionData": - """Mix session data to account for class imbalance. + Args: + list_of_tokens: tokens for examples + in_token_features: token features from ConveRT + shape: shape of feature matrix - This batching strategy puts rare classes approximately in every other batch, - by repeating them. Mimics stratified batching, but also takes into account - that more populated classes should appear more often. + Returns: + Token features. """ + if shape is None: + shape = in_token_features.shape + out_token_features = np.zeros(shape) + + for example_idx, example_tokens in enumerate(list_of_tokens): + offset = 0 + for token_idx, token in enumerate(example_tokens): + number_sub_words = token.get(NUMBER_OF_SUB_TOKENS, 1) + + if number_sub_words > 1: + token_start_idx = token_idx + offset + token_end_idx = token_idx + offset + number_sub_words + + mean_vec = np.mean( + in_token_features[example_idx][token_start_idx:token_end_idx], + axis=0, + ) - num_examples = len(session_data.X) - unique_label_ids, counts_label_ids = np.unique( - session_data.label_ids, return_counts=True, axis=0 - ) - num_label_ids = len(unique_label_ids) - - # need to call every time, so that the data is shuffled inside each class - label_data = split_session_data_by_label(session_data, unique_label_ids) - - data_idx = [0] * num_label_ids - num_data_cycles = [0] * num_label_ids - skipped = [False] * num_label_ids - new_X = [] - new_Y = [] - new_label_ids = [] - while min(num_data_cycles) == 0: - if shuffle: - indices_of_labels = np.random.permutation(num_label_ids) - else: - indices_of_labels = range(num_label_ids) + offset += number_sub_words - 1 - for index in indices_of_labels: - if num_data_cycles[index] > 0 and not skipped[index]: - skipped[index] = True - continue + out_token_features[example_idx][token_idx] = mean_vec else: - skipped[index] = False - - index_batch_size = ( - int(counts_label_ids[index] / num_examples * batch_size) + 1 - ) - - new_X.append( - label_data[index].X[ - data_idx[index] : data_idx[index] + index_batch_size - ] - ) - new_Y.append( - label_data[index].Y[ - data_idx[index] : data_idx[index] + index_batch_size - ] - ) - new_label_ids.append( - label_data[index].label_ids[ - data_idx[index] : data_idx[index] + index_batch_size - ] - ) - - data_idx[index] += index_batch_size - if data_idx[index] >= counts_label_ids[index]: - num_data_cycles[index] += 1 - data_idx[index] = 0 - - if min(num_data_cycles) > 0: - break - - return SessionData( - X=np.concatenate(new_X), - Y=np.concatenate(new_Y), - label_ids=np.concatenate(new_label_ids), - ) - - -def gen_batch( - session_data: "SessionData", - batch_size: int, - batch_strategy: Text = "sequence", - shuffle: bool = False, -) -> Generator[Tuple["np.ndarray", "np.ndarray"], None, None]: - """Generate batches.""" - - if shuffle: - session_data = shuffle_session_data(session_data) - - if batch_strategy == "balanced": - session_data = balance_session_data(session_data, batch_size, shuffle) - - num_batches = session_data.X.shape[0] // batch_size + int( - session_data.X.shape[0] % batch_size > 0 - ) - - for batch_num in range(num_batches): - batch_x = session_data.X[batch_num * batch_size : (batch_num + 1) * batch_size] - batch_y = session_data.Y[batch_num * batch_size : (batch_num + 1) * batch_size] - - yield batch_x, batch_y - - -# noinspection PyPep8Naming -def create_tf_dataset( - session_data: "SessionData", - batch_size: Union["tf.Tensor", int], - batch_strategy: Text = "sequence", - shuffle: bool = False, -) -> "tf.data.Dataset": - """Create tf dataset.""" - - # set batch and sequence length to None - if session_data.X[0].ndim == 1: - shape_X = (None, session_data.X[0].shape[-1]) - else: - shape_X = (None, None, session_data.X[0].shape[-1]) - - if session_data.Y[0].ndim == 1: - shape_Y = (None, session_data.Y[0].shape[-1]) - else: - shape_Y = (None, None, session_data.Y[0].shape[-1]) - - return tf.data.Dataset.from_generator( - lambda batch_size_: gen_batch( - session_data, batch_size_, batch_strategy, shuffle - ), - output_types=(tf.float32, tf.float32), - output_shapes=(shape_X, shape_Y), - args=([batch_size]), - ) + out_token_features[example_idx][token_idx] = in_token_features[ + example_idx + ][token_idx + offset] + return out_token_features -def create_iterator_init_datasets( - session_data: "SessionData", - eval_session_data: "SessionData", - batch_size: Union["tf.Tensor", int], - batch_strategy: Text, -) -> Tuple["tf.data.Iterator", "tf.Operation", "tf.Operation"]: - """Create iterator and init datasets.""" - train_dataset = create_tf_dataset( - session_data, batch_size, batch_strategy=batch_strategy, shuffle=True - ) - - iterator = tf.data.Iterator.from_structure( - train_dataset.output_types, - train_dataset.output_shapes, - output_classes=train_dataset.output_classes, - ) - - train_init_op = iterator.make_initializer(train_dataset) - - if eval_session_data is not None: - eval_init_op = iterator.make_initializer( - create_tf_dataset(eval_session_data, batch_size) - ) - else: - eval_init_op = None - - return iterator, train_init_op, eval_init_op - - -# noinspection PyPep8Naming -def create_tf_fnn( - x_in: "tf.Tensor", - layer_sizes: List[int], - droprate: float, - C2: float, - is_training: "tf.Tensor", - layer_name_suffix: Text, - activation: Optional[Callable] = tf.nn.relu, - use_bias: bool = True, - kernel_initializer: Optional["tf.keras.initializers.Initializer"] = None, -) -> "tf.Tensor": - """Create nn with hidden layers and name suffix.""" - - reg = tf.contrib.layers.l2_regularizer(C2) - x = tf.nn.relu(x_in) - for i, layer_size in enumerate(layer_sizes): - x = tf.layers.dense( - inputs=x, - units=layer_size, - activation=activation, - use_bias=use_bias, - kernel_initializer=kernel_initializer, - kernel_regularizer=reg, - name="hidden_layer_{}_{}".format(layer_name_suffix, i), - reuse=tf.AUTO_REUSE, - ) - x = tf.layers.dropout(x, rate=droprate, training=is_training) - return x +def update_evaluation_parameters(config: Dict[Text, Any]) -> Dict[Text, Any]: + """ + If EVAL_NUM_EPOCHS is set to -1, evaluate at the end of the training. + Args: + config: model configuration -def tf_normalize_if_cosine(x: "tf.Tensor", similarity_type: Text) -> "tf.Tensor": - """Normalize embedding if similarity type is cosine.""" + Returns: updated model configuration + """ - if similarity_type == "cosine": - return tf.nn.l2_normalize(x, -1) - elif similarity_type == "inner": - return x - else: + if config[EVAL_NUM_EPOCHS] == -1: + config[EVAL_NUM_EPOCHS] = config[EPOCHS] + elif config[EVAL_NUM_EPOCHS] < 1: raise ValueError( - "Wrong similarity type '{}', " - "should be 'cosine' or 'inner'" - "".format(similarity_type) - ) - - -# noinspection PyPep8Naming -def create_tf_embed( - x: "tf.Tensor", - embed_dim: int, - C2: float, - similarity_type: Text, - layer_name_suffix: Text, -) -> "tf.Tensor": - """Create dense embedding layer with a name.""" - - reg = tf.contrib.layers.l2_regularizer(C2) - embed_x = tf.layers.dense( - inputs=x, - units=embed_dim, - activation=None, - kernel_regularizer=reg, - name="embed_layer_{}".format(layer_name_suffix), - reuse=tf.AUTO_REUSE, - ) - # normalize embedding vectors for cosine similarity - return tf_normalize_if_cosine(embed_x, similarity_type) - - -def create_t2t_hparams( - num_transformer_layers: int, - transformer_size: int, - num_heads: int, - droprate: float, - pos_encoding: Text, - max_seq_length: int, - is_training: "tf.Tensor", -) -> "HParams": - """Create parameters for t2t transformer.""" - - hparams = transformer_base() - - hparams.num_hidden_layers = num_transformer_layers - hparams.hidden_size = transformer_size - # it seems to be factor of 4 for transformer architectures in t2t - hparams.filter_size = hparams.hidden_size * 4 - hparams.num_heads = num_heads - hparams.relu_dropout = droprate - hparams.pos = pos_encoding - - hparams.max_length = max_seq_length - - hparams.unidirectional_encoder = True - - hparams.self_attention_type = "dot_product_relative_v2" - hparams.max_relative_position = 5 - hparams.add_relative_to_values = True - - # When not in training mode, set all forms of dropout to zero. - for key, value in hparams.values().items(): - if key.endswith("dropout") or key == "label_smoothing": - setattr(hparams, key, value * tf.cast(is_training, tf.float32)) - - return hparams - - -# noinspection PyUnresolvedReferences -# noinspection PyPep8Naming -def create_t2t_transformer_encoder( - x_in: "tf.Tensor", - mask: "tf.Tensor", - attention_weights: Dict[Text, "tf.Tensor"], - hparams: "HParams", - C2: float, - is_training: "tf.Tensor", -) -> "tf.Tensor": - """Create t2t transformer encoder.""" - - with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE): - x = create_tf_fnn( - x_in, - [hparams.hidden_size], - hparams.layer_prepostprocess_dropout, - C2, - is_training, - layer_name_suffix="pre_embed", - activation=None, - use_bias=False, - kernel_initializer=tf.random_normal_initializer( - 0.0, hparams.hidden_size ** -0.5 - ), - ) - if hparams.multiply_embedding_mode == "sqrt_depth": - x *= hparams.hidden_size ** 0.5 - - x *= tf.expand_dims(mask, -1) - ( - x, - self_attention_bias, - encoder_decoder_attention_bias, - ) = transformer_prepare_encoder(x, None, hparams) - - x *= tf.expand_dims(mask, -1) - - x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) - - attn_bias_for_padding = None - # Otherwise the encoder will just use encoder_self_attention_bias. - if hparams.unidirectional_encoder: - attn_bias_for_padding = encoder_decoder_attention_bias - - x = transformer_encoder( - x, - self_attention_bias, - hparams, - nonpadding=mask, - save_weights_to=attention_weights, - attn_bias_for_padding=attn_bias_for_padding, - ) - - x *= tf.expand_dims(mask, -1) - - return tf.nn.dropout(tf.nn.relu(x), 1.0 - hparams.layer_prepostprocess_dropout) - - -def _tf_make_flat(x: "tf.Tensor") -> "tf.Tensor": - """Make tensor 2D.""" - - return tf.reshape(x, (-1, x.shape[-1])) - - -def _tf_sample_neg( - batch_size: "tf.Tensor", all_bs: "tf.Tensor", neg_ids: "tf.Tensor" -) -> "tf.Tensor": - """Sample negative examples for given indices""" - - tiled_all_bs = tf.tile(tf.expand_dims(all_bs, 0), (batch_size, 1, 1)) - - return tf.batch_gather(tiled_all_bs, neg_ids) - - -def _tf_calc_iou_mask( - pos_b: "tf.Tensor", all_bs: "tf.Tensor", neg_ids: "tf.Tensor" -) -> "tf.Tensor": - """Calculate IOU mask for given indices""" - - pos_b_in_flat = tf.expand_dims(pos_b, -2) - neg_b_in_flat = _tf_sample_neg(tf.shape(pos_b)[0], all_bs, neg_ids) - - intersection_b_in_flat = tf.minimum(neg_b_in_flat, pos_b_in_flat) - union_b_in_flat = tf.maximum(neg_b_in_flat, pos_b_in_flat) - - iou = tf.reduce_sum(intersection_b_in_flat, -1) / tf.reduce_sum(union_b_in_flat, -1) - return 1.0 - tf.nn.relu(tf.sign(1.0 - iou)) - - -def _tf_get_negs( - all_embed: "tf.Tensor", all_raw: "tf.Tensor", raw_pos: "tf.Tensor", num_neg: int -) -> Tuple["tf.Tensor", "tf.Tensor"]: - """Get negative examples from given tensor.""" - - if len(raw_pos.shape) == 3: - batch_size = tf.shape(raw_pos)[0] - seq_length = tf.shape(raw_pos)[1] - else: # len(raw_pos.shape) == 2 - batch_size = tf.shape(raw_pos)[0] - seq_length = 1 - - raw_flat = _tf_make_flat(raw_pos) - - total_candidates = tf.shape(all_embed)[0] - - all_indices = tf.tile( - tf.expand_dims(tf.range(0, total_candidates, 1), 0), - (batch_size * seq_length, 1), - ) - shuffled_indices = tf.transpose( - tf.random.shuffle(tf.transpose(all_indices, (1, 0))), (1, 0) - ) - neg_ids = shuffled_indices[:, :num_neg] - - bad_negs = _tf_calc_iou_mask(raw_flat, all_raw, neg_ids) - if len(raw_pos.shape) == 3: - bad_negs = tf.reshape(bad_negs, (batch_size, seq_length, -1)) - - neg_embed = _tf_sample_neg(batch_size * seq_length, all_embed, neg_ids) - if len(raw_pos.shape) == 3: - neg_embed = tf.reshape( - neg_embed, (batch_size, seq_length, -1, all_embed.shape[-1]) + f"'{EVAL_NUM_EXAMPLES}' is set to " + f"'{config[EVAL_NUM_EPOCHS]}'. " + f"Only values > 1 are allowed for this configuration value." ) - return neg_embed, bad_negs + return config -def sample_negatives( - a_embed: "tf.Tensor", - b_embed: "tf.Tensor", - b_raw: "tf.Tensor", - all_b_embed: "tf.Tensor", - all_b_raw: "tf.Tensor", - num_neg: int, -) -> Tuple[ - "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor" -]: - """Sample negative examples.""" +def load_tf_hub_model(model_url: Text) -> Any: + """Load model from cache if possible, otherwise from TFHub""" - neg_dial_embed, dial_bad_negs = _tf_get_negs( - _tf_make_flat(a_embed), _tf_make_flat(b_raw), b_raw, num_neg - ) + import tensorflow_hub as tfhub - neg_bot_embed, bot_bad_negs = _tf_get_negs(all_b_embed, all_b_raw, b_raw, num_neg) - return ( - tf.expand_dims(a_embed, -2), - tf.expand_dims(b_embed, -2), - neg_dial_embed, - neg_bot_embed, - dial_bad_negs, - bot_bad_negs, - ) + # needed to load the ConveRT model + # noinspection PyUnresolvedReferences + import tensorflow_text + import os + # required to take care of cases when other files are already + # stored in the default TFHUB_CACHE_DIR + try: + return tfhub.load(model_url) + except OSError: + directory = io_utils.create_temporary_directory() + os.environ["TFHUB_CACHE_DIR"] = directory + return tfhub.load(model_url) -def tf_raw_sim( - a: "tf.Tensor", b: "tf.Tensor", mask: Optional["tf.Tensor"] -) -> "tf.Tensor": - """Calculate similarity between given tensors.""" - sim = tf.reduce_sum(a * b, -1) - if mask is not None: - sim *= tf.expand_dims(mask, 2) +def _replace_deprecated_option( + old_option: Text, new_option: Union[Text, List[Text]], config: Dict[Text, Any] +) -> Dict[Text, Any]: + if old_option in config: + if isinstance(new_option, str): + logger.warning( + f"Option '{old_option}' got renamed to '{new_option}'. " + f"Please update your configuration file." + ) + config[new_option] = config[old_option] + else: + logger.warning( + f"Option '{old_option}' got renamed to " + f"a dictionary '{new_option[0]}' with a key '{new_option[1]}'. " + f"Please update your configuration file." + ) + option_dict = config.get(new_option[0], {}) + option_dict[new_option[1]] = config[old_option] + config[new_option[0]] = option_dict - return sim + return config -def tf_sim( - pos_dial_embed: "tf.Tensor", - pos_bot_embed: "tf.Tensor", - neg_dial_embed: "tf.Tensor", - neg_bot_embed: "tf.Tensor", - dial_bad_negs: "tf.Tensor", - bot_bad_negs: "tf.Tensor", - mask: Optional["tf.Tensor"], -) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor", "tf.Tensor"]: - """Define similarity.""" +def check_deprecated_options(config: Dict[Text, Any]) -> Dict[Text, Any]: + """ + If old model configuration parameters are present in the provided config, replace + them with the new parameters and log a warning. + Args: + config: model configuration - # calculate similarity with several - # embedded actions for the loss - neg_inf = large_compatible_negative(pos_dial_embed.dtype) + Returns: updated model configuration + """ - sim_pos = tf_raw_sim(pos_dial_embed, pos_bot_embed, mask) - sim_neg = tf_raw_sim(pos_dial_embed, neg_bot_embed, mask) + neg_inf * bot_bad_negs - sim_neg_bot_bot = ( - tf_raw_sim(pos_bot_embed, neg_bot_embed, mask) + neg_inf * bot_bad_negs + config = _replace_deprecated_option( + "hidden_layers_sizes_pre_dial", [HIDDEN_LAYERS_SIZES, DIALOGUE], config ) - sim_neg_dial_dial = ( - tf_raw_sim(pos_dial_embed, neg_dial_embed, mask) + neg_inf * dial_bad_negs + config = _replace_deprecated_option( + "hidden_layers_sizes_bot", [HIDDEN_LAYERS_SIZES, LABEL], config ) - sim_neg_bot_dial = ( - tf_raw_sim(pos_bot_embed, neg_dial_embed, mask) + neg_inf * dial_bad_negs + config = _replace_deprecated_option("droprate", DROP_RATE, config) + config = _replace_deprecated_option("droprate_a", DROP_RATE_DIALOGUE, config) + config = _replace_deprecated_option("droprate_b", DROP_RATE_LABEL, config) + config = _replace_deprecated_option( + "hidden_layers_sizes_a", [HIDDEN_LAYERS_SIZES, TEXT], config ) - - # output similarities between user input and bot actions - # and similarities between bot actions and similarities between user inputs - return sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial - - -def tf_calc_accuracy(sim_pos: "tf.Tensor", sim_neg: "tf.Tensor") -> "tf.Tensor": - """Calculate accuracy""" - - max_all_sim = tf.reduce_max(tf.concat([sim_pos, sim_neg], -1), -1) - return tf.reduce_mean( - tf.cast(tf.math.equal(max_all_sim, tf.squeeze(sim_pos, -1)), tf.float32) + config = _replace_deprecated_option( + "hidden_layers_sizes_b", [HIDDEN_LAYERS_SIZES, LABEL], config ) - - -# noinspection PyPep8Naming -def tf_loss_margin( - sim_pos: "tf.Tensor", - sim_neg: "tf.Tensor", - sim_neg_bot_bot: "tf.Tensor", - sim_neg_dial_dial: "tf.Tensor", - sim_neg_bot_dial: "tf.Tensor", - mask: Optional["tf.Tensor"], - mu_pos: float, - mu_neg: float, - use_max_sim_neg: bool, - C_emb: float, -) -> "tf.Tensor": - """Define max margin loss.""" - - # loss for maximizing similarity with correct action - loss = tf.maximum(0.0, mu_pos - tf.squeeze(sim_pos, -1)) - - # loss for minimizing similarity with `num_neg` incorrect actions - if use_max_sim_neg: - # minimize only maximum similarity over incorrect actions - max_sim_neg = tf.reduce_max(sim_neg, -1) - loss += tf.maximum(0.0, mu_neg + max_sim_neg) - else: - # minimize all similarities with incorrect actions - max_margin = tf.maximum(0.0, mu_neg + sim_neg) - loss += tf.reduce_sum(max_margin, -1) - - # penalize max similarity between pos bot and neg bot embeddings - max_sim_neg_bot = tf.maximum(0.0, tf.reduce_max(sim_neg_bot_bot, -1)) - loss += max_sim_neg_bot * C_emb - - # penalize max similarity between pos dial and neg dial embeddings - max_sim_neg_dial = tf.maximum(0.0, tf.reduce_max(sim_neg_dial_dial, -1)) - loss += max_sim_neg_dial * C_emb - - # penalize max similarity between pos bot and neg dial embeddings - max_sim_neg_dial = tf.maximum(0.0, tf.reduce_max(sim_neg_bot_dial, -1)) - loss += max_sim_neg_dial * C_emb - - if mask is not None: - # mask loss for different length sequences - loss *= mask - # average the loss over sequence length - loss = tf.reduce_sum(loss, -1) / tf.reduce_sum(mask, 1) - - # average the loss over the batch - loss = tf.reduce_mean(loss) - - # add regularization losses - loss += tf.losses.get_regularization_loss() - - return loss - - -def tf_loss_softmax( - sim_pos: "tf.Tensor", - sim_neg: "tf.Tensor", - sim_neg_bot_bot: "tf.Tensor", - sim_neg_dial_dial: "tf.Tensor", - sim_neg_bot_dial: "tf.Tensor", - mask: Optional["tf.Tensor"], - scale_loss: bool, -) -> "tf.Tensor": - """Define softmax loss.""" - - logits = tf.concat( - [sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial], -1 - ) - - # create labels for softmax - if len(logits.shape) == 3: - pos_labels = tf.ones_like(logits[:, :, :1]) - neg_labels = tf.zeros_like(logits[:, :, 1:]) - else: # len(logits.shape) == 2 - pos_labels = tf.ones_like(logits[:, :1]) - neg_labels = tf.zeros_like(logits[:, 1:]) - labels = tf.concat([pos_labels, neg_labels], -1) - - if mask is None: - mask = 1.0 - - if scale_loss: - # mask loss by prediction confidence - pred = tf.nn.softmax(logits) - if len(pred.shape) == 3: - pos_pred = pred[:, :, 0] - else: # len(pred.shape) == 2 - pos_pred = pred[:, 0] - mask *= tf.pow((1 - pos_pred) / 0.5, 4) - - loss = tf.losses.softmax_cross_entropy(labels, logits, mask) - # add regularization losses - loss += tf.losses.get_regularization_loss() - - return loss - - -# noinspection PyPep8Naming -def choose_loss( - sim_pos: "tf.Tensor", - sim_neg: "tf.Tensor", - sim_neg_bot_bot: "tf.Tensor", - sim_neg_dial_dial: "tf.Tensor", - sim_neg_bot_dial: "tf.Tensor", - mask: Optional["tf.Tensor"], - loss_type: Text, - mu_pos: float, - mu_neg: float, - use_max_sim_neg: bool, - C_emb: float, - scale_loss: bool, -) -> "tf.Tensor": - """Use loss depending on given option.""" - - if loss_type == "margin": - return tf_loss_margin( - sim_pos, - sim_neg, - sim_neg_bot_bot, - sim_neg_dial_dial, - sim_neg_bot_dial, - mask, - mu_pos, - mu_neg, - use_max_sim_neg, - C_emb, - ) - elif loss_type == "softmax": - return tf_loss_softmax( - sim_pos, - sim_neg, - sim_neg_bot_bot, - sim_neg_dial_dial, - sim_neg_bot_dial, - mask, - scale_loss, - ) - else: - raise ValueError( - "Wrong loss type '{}', " - "should be 'margin' or 'softmax'" - "".format(loss_type) - ) - - -# noinspection PyPep8Naming -def calculate_loss_acc( - a_embed: "tf.Tensor", - b_embed: "tf.Tensor", - b_raw: "tf.Tensor", - all_b_embed: "tf.Tensor", - all_b_raw: "tf.Tensor", - num_neg: int, - mask: Optional["tf.Tensor"], - loss_type: Text, - mu_pos: float, - mu_neg: float, - use_max_sim_neg: bool, - C_emb: float, - scale_loss: bool, -) -> Tuple["tf.Tensor", "tf.Tensor"]: - """Calculate loss and accuracy.""" - - ( - pos_dial_embed, - pos_bot_embed, - neg_dial_embed, - neg_bot_embed, - dial_bad_negs, - bot_bad_negs, - ) = sample_negatives(a_embed, b_embed, b_raw, all_b_embed, all_b_raw, num_neg) - - # calculate similarities - (sim_pos, sim_neg, sim_neg_bot_bot, sim_neg_dial_dial, sim_neg_bot_dial) = tf_sim( - pos_dial_embed, - pos_bot_embed, - neg_dial_embed, - neg_bot_embed, - dial_bad_negs, - bot_bad_negs, - mask, + config = _replace_deprecated_option( + "num_transformer_layers", NUM_TRANSFORMER_LAYERS, config ) - - acc = tf_calc_accuracy(sim_pos, sim_neg) - - loss = choose_loss( - sim_pos, - sim_neg, - sim_neg_bot_bot, - sim_neg_dial_dial, - sim_neg_bot_dial, - mask, - loss_type, - mu_pos, - mu_neg, - use_max_sim_neg, - C_emb, - scale_loss, + config = _replace_deprecated_option("num_heads", NUM_HEADS, config) + config = _replace_deprecated_option("dense_dim", DENSE_DIMENSION, config) + config = _replace_deprecated_option("embed_dim", EMBEDDING_DIMENSION, config) + config = _replace_deprecated_option("num_neg", NUM_NEG, config) + config = _replace_deprecated_option("mu_pos", MAX_POS_SIM, config) + config = _replace_deprecated_option("mu_neg", MAX_NEG_SIM, config) + config = _replace_deprecated_option("use_max_sim_neg", USE_MAX_NEG_SIM, config) + config = _replace_deprecated_option("C2", REGULARIZATION_CONSTANT, config) + config = _replace_deprecated_option("C_emb", NEGATIVE_MARGIN_SCALE, config) + config = _replace_deprecated_option( + "evaluate_every_num_epochs", EVAL_NUM_EPOCHS, config ) - - return loss, acc - - -def confidence_from_sim(sim: "tf.Tensor", similarity_type: Text) -> "tf.Tensor": - if similarity_type == "cosine": - # clip negative values to zero - return tf.nn.relu(sim) - else: - # normalize result to [0, 1] with softmax - return tf.nn.softmax(sim) - - -def linearly_increasing_batch_size( - epoch: int, batch_size: Union[List[int], int], epochs: int -) -> int: - """Linearly increase batch size with every epoch. - - The idea comes from https://arxiv.org/abs/1711.00489. - """ - - if not isinstance(batch_size, list): - return int(batch_size) - - if epochs > 1: - return int( - batch_size[0] + epoch * (batch_size[1] - batch_size[0]) / (epochs - 1) - ) - else: - return int(batch_size[0]) - - -def output_validation_stat( - eval_init_op: "tf.Operation", - loss: "tf.Tensor", - acc: "tf.Tensor", - session: "tf.Session", - is_training: "tf.Session", - batch_size_in: "tf.Tensor", - ep_batch_size: int, -) -> Tuple[float, float]: - """Output training statistics""" - - session.run(eval_init_op, feed_dict={batch_size_in: ep_batch_size}) - ep_val_loss = 0 - ep_val_acc = 0 - batches_per_epoch = 0 - while True: - try: - batch_val_loss, batch_val_acc = session.run( - [loss, acc], feed_dict={is_training: False} - ) - batches_per_epoch += 1 - ep_val_loss += batch_val_loss - ep_val_acc += batch_val_acc - except tf.errors.OutOfRangeError: - break - - return ep_val_loss / batches_per_epoch, ep_val_acc / batches_per_epoch - - -def train_tf_dataset( - train_init_op: "tf.Operation", - eval_init_op: "tf.Operation", - batch_size_in: "tf.Tensor", - loss: "tf.Tensor", - acc: "tf.Tensor", - train_op: "tf.Tensor", - session: "tf.Session", - is_training: "tf.Session", - epochs: int, - batch_size: Union[List[int], int], - evaluate_on_num_examples: int, - evaluate_every_num_epochs: int, -) -> None: - """Train tf graph""" - - session.run(tf.global_variables_initializer()) - - if evaluate_on_num_examples: - logger.info( - "Validation accuracy is calculated every {} epochs" - "".format(evaluate_every_num_epochs) - ) - pbar = tqdm(range(epochs), desc="Epochs", disable=is_logging_disabled()) - - train_loss = 0 - train_acc = 0 - val_loss = 0 - val_acc = 0 - for ep in pbar: - - ep_batch_size = linearly_increasing_batch_size(ep, batch_size, epochs) - - session.run(train_init_op, feed_dict={batch_size_in: ep_batch_size}) - - ep_train_loss = 0 - ep_train_acc = 0 - batches_per_epoch = 0 - while True: - try: - _, batch_train_loss, batch_train_acc = session.run( - [train_op, loss, acc], feed_dict={is_training: True} - ) - batches_per_epoch += 1 - ep_train_loss += batch_train_loss - ep_train_acc += batch_train_acc - - except tf.errors.OutOfRangeError: - break - - train_loss = ep_train_loss / batches_per_epoch - train_acc = ep_train_acc / batches_per_epoch - - postfix_dict = { - "loss": "{:.3f}".format(train_loss), - "acc": "{:.3f}".format(train_acc), - } - - if eval_init_op is not None: - if (ep + 1) % evaluate_every_num_epochs == 0 or (ep + 1) == epochs: - val_loss, val_acc = output_validation_stat( - eval_init_op, - loss, - acc, - session, - is_training, - batch_size_in, - ep_batch_size, - ) - - postfix_dict.update( - { - "val_loss": "{:.3f}".format(val_loss), - "val_acc": "{:.3f}".format(val_acc), - } - ) - - pbar.set_postfix(postfix_dict) - - final_message = ( - "Finished training embedding policy, " - "train loss={:.3f}, train accuracy={:.3f}" - "".format(train_loss, train_acc) + config = _replace_deprecated_option( + "evaluate_on_num_examples", EVAL_NUM_EXAMPLES, config ) - if eval_init_op is not None: - final_message += ( - ", validation loss={:.3f}, validation accuracy={:.3f}" - "".format(val_loss, val_acc) - ) - logger.info(final_message) - - -def extract_attention(attention_weights) -> Optional["tf.Tensor"]: - """Extract attention probabilities from t2t dict""" - - attention = [ - tf.expand_dims(t, 0) - for name, t in attention_weights.items() - # the strings come from t2t library - if "multihead_attention/dot_product" in name and not name.endswith("/logits") - ] - - if attention: - return tf.concat(attention, 0) - - -def persist_tensor(name: Text, tensor: "tf.Tensor", graph: "tf.Graph") -> None: - """Add tensor to collection if it is not None""" - - if tensor is not None: - graph.clear_collection(name) - graph.add_to_collection(name, tensor) - - -def load_tensor(name: Text) -> Optional["tf.Tensor"]: - """Load tensor or set it to None""" - tensor_list = tf.get_collection(name) - return tensor_list[0] if tensor_list else None + return config diff --git a/rasa/utils/validation.py b/rasa/utils/validation.py index 817e150db16b..d4b70fa11ea4 100644 --- a/rasa/utils/validation.py +++ b/rasa/utils/validation.py @@ -1,15 +1,15 @@ -from typing import Text +from typing import Text, Dict, Any from ruamel.yaml.constructor import DuplicateKeyError -from rasa.constants import PACKAGE_NAME +from rasa.constants import PACKAGE_NAME, DOCS_URL_TRAINING_DATA_NLU class InvalidYamlFileError(ValueError): """Raised if an invalid yaml file was provided.""" def __init__(self, message: Text) -> None: - super(InvalidYamlFileError, self).__init__(message) + super().__init__(message) def validate_yaml_schema( @@ -64,3 +64,27 @@ def validate_yaml_schema( "take a look at the errors logged during " "validation previous to this exception." ) + + +def validate_training_data(json_data: Dict[Text, Any], schema: Dict[Text, Any]) -> None: + """Validate rasa training data format to ensure proper training. + + Args: + json_data: the data to validate + schema: the schema + + Raises: + ValidationError if validation fails. + """ + from jsonschema import validate + from jsonschema import ValidationError + + try: + validate(json_data, schema) + except ValidationError as e: + e.message += ( + f". Failed to validate data, make sure your data " + f"is valid. For more information about the format visit " + f"{DOCS_URL_TRAINING_DATA_NLU}." + ) + raise e diff --git a/rasa/validator.py b/rasa/validator.py new file mode 100644 index 000000000000..db532c5726f1 --- /dev/null +++ b/rasa/validator.py @@ -0,0 +1,330 @@ +import logging +from collections import defaultdict +from typing import Set, Text, Optional, Dict, Any + +from packaging import version +from packaging.version import LegacyVersion + +import rasa.core.training.story_conflict +from rasa.constants import ( + DOCS_URL_DOMAINS, + DOCS_URL_ACTIONS, + LATEST_TRAINING_DATA_FORMAT_VERSION, + DOCS_BASE_URL, +) +from rasa.core.constants import UTTER_PREFIX +from rasa.core.domain import Domain +from rasa.core.events import ActionExecuted +from rasa.core.events import UserUttered +from rasa.core.training.generator import TrainingDataGenerator +from rasa.core.training.structures import StoryGraph +from rasa.importers.importer import TrainingDataImporter +from rasa.nlu.training_data import TrainingData +from rasa.utils.common import raise_warning + +logger = logging.getLogger(__name__) + +KEY_TRAINING_DATA_FORMAT_VERSION = "version" + + +class Validator: + """A class used to verify usage of intents and utterances.""" + + def __init__( + self, domain: Domain, intents: TrainingData, story_graph: StoryGraph + ) -> None: + """Initializes the Validator object. """ + + self.domain = domain + self.intents = intents + self.story_graph = story_graph + + @classmethod + async def from_importer(cls, importer: TrainingDataImporter) -> "Validator": + """Create an instance from the domain, nlu and story files.""" + + domain = await importer.get_domain() + story_graph = await importer.get_stories() + intents = await importer.get_nlu_data() + + return cls(domain, intents, story_graph) + + def verify_intents(self, ignore_warnings: bool = True) -> bool: + """Compares list of intents in domain with intents in NLU training data.""" + + everything_is_alright = True + + nlu_data_intents = {e.data["intent"] for e in self.intents.intent_examples} + + for intent in self.domain.intents: + if intent not in nlu_data_intents: + logger.debug( + f"The intent '{intent}' is listed in the domain file, but " + f"is not found in the NLU training data." + ) + everything_is_alright = ignore_warnings and everything_is_alright + + for intent in nlu_data_intents: + if intent not in self.domain.intents: + raise_warning( + f"There is a message in the training data labeled with intent " + f"'{intent}'. This intent is not listed in your domain. You " + f"should need to add that intent to your domain file!", + docs=DOCS_URL_DOMAINS, + ) + everything_is_alright = False + + return everything_is_alright + + def verify_example_repetition_in_intents( + self, ignore_warnings: bool = True + ) -> bool: + """Checks if there is no duplicated example in different intents.""" + + everything_is_alright = True + + duplication_hash = defaultdict(set) + for example in self.intents.intent_examples: + text = example.text + duplication_hash[text].add(example.get("intent")) + + for text, intents in duplication_hash.items(): + + if len(duplication_hash[text]) > 1: + everything_is_alright = ignore_warnings and everything_is_alright + intents_string = ", ".join(sorted(intents)) + raise_warning( + f"The example '{text}' was found labeled with multiple " + f"different intents in the training data. Each annotated message " + f"should only appear with one intent. You should fix that " + f"conflict The example is labeled with: {intents_string}." + ) + return everything_is_alright + + def verify_intents_in_stories(self, ignore_warnings: bool = True) -> bool: + """Checks intents used in stories. + + Verifies if the intents used in the stories are valid, and whether + all valid intents are used in the stories.""" + + everything_is_alright = self.verify_intents(ignore_warnings) + + stories_intents = { + event.intent["name"] + for story in self.story_graph.story_steps + for event in story.events + if type(event) == UserUttered + } + + for story_intent in stories_intents: + if story_intent not in self.domain.intents: + raise_warning( + f"The intent '{story_intent}' is used in your stories, but it " + f"is not listed in the domain file. You should add it to your " + f"domain file!", + docs=DOCS_URL_DOMAINS, + ) + everything_is_alright = False + + for intent in self.domain.intents: + if intent not in stories_intents: + logger.debug(f"The intent '{intent}' is not used in any story.") + everything_is_alright = ignore_warnings and everything_is_alright + + return everything_is_alright + + def _gather_utterance_actions(self) -> Set[Text]: + """Return all utterances which are actions.""" + return { + utterance + for utterance in self.domain.templates.keys() + if utterance in self.domain.action_names + } + + def verify_utterances(self, ignore_warnings: bool = True) -> bool: + """Compares list of utterances in actions with utterances in responses.""" + + actions = self.domain.action_names + utterance_templates = set(self.domain.templates) + everything_is_alright = True + + for utterance in utterance_templates: + if utterance not in actions: + logger.debug( + f"The utterance '{utterance}' is not listed under 'actions' in the " + f"domain file. It can only be used as a template." + ) + everything_is_alright = ignore_warnings and everything_is_alright + + for action in actions: + if action.startswith(UTTER_PREFIX): + if action not in utterance_templates: + raise_warning( + f"There is no template for the utterance action '{action}'. " + f"The action is listed in your domains action list, but " + f"there is no template defined with this name. You should " + f"add a template with this key.", + docs=DOCS_URL_ACTIONS + "#utterance-actions", + ) + everything_is_alright = False + + return everything_is_alright + + def verify_utterances_in_stories(self, ignore_warnings: bool = True) -> bool: + """Verifies usage of utterances in stories. + + Checks whether utterances used in the stories are valid, + and whether all valid utterances are used in stories.""" + + everything_is_alright = self.verify_utterances() + + utterance_actions = self._gather_utterance_actions() + stories_utterances = set() + + for story in self.story_graph.story_steps: + for event in story.events: + if not isinstance(event, ActionExecuted): + continue + if not event.action_name.startswith(UTTER_PREFIX): + # we are only interested in utter actions + continue + + if event.action_name in stories_utterances: + # we already processed this one before, we only want to warn once + continue + + if event.action_name not in utterance_actions: + raise_warning( + f"The action '{event.action_name}' is used in the stories, " + f"but is not a valid utterance action. Please make sure " + f"the action is listed in your domain and there is a " + f"template defined with its name.", + docs=DOCS_URL_ACTIONS + "#utterance-actions", + ) + everything_is_alright = False + stories_utterances.add(event.action_name) + + for utterance in utterance_actions: + if utterance not in stories_utterances: + logger.debug(f"The utterance '{utterance}' is not used in any story.") + everything_is_alright = ignore_warnings and everything_is_alright + + return everything_is_alright + + def verify_story_structure( + self, ignore_warnings: bool = True, max_history: Optional[int] = None + ) -> bool: + """Verifies that the bot behaviour in stories is deterministic. + + Args: + ignore_warnings: When `True`, return `True` even if conflicts were found. + max_history: Maximal number of events to take into account for conflict identification. + + Returns: + `False` is a conflict was found and `ignore_warnings` is `False`. + `True` otherwise. + """ + + logger.info("Story structure validation...") + + trackers = TrainingDataGenerator( + self.story_graph, + domain=self.domain, + remove_duplicates=False, + augmentation_factor=0, + ).generate() + + # Create a list of `StoryConflict` objects + conflicts = rasa.core.training.story_conflict.find_story_conflicts( + trackers, self.domain, max_history + ) + + if not conflicts: + logger.info("No story structure conflicts found.") + else: + for conflict in conflicts: + logger.warning(conflict) + + return ignore_warnings or not conflicts + + def verify_nlu(self, ignore_warnings: bool = True) -> bool: + """Runs all the validations on intents and utterances.""" + + logger.info("Validating intents...") + intents_are_valid = self.verify_intents_in_stories(ignore_warnings) + + logger.info("Validating uniqueness of intents and stories...") + there_is_no_duplication = self.verify_example_repetition_in_intents( + ignore_warnings + ) + + logger.info("Validating utterances...") + stories_are_valid = self.verify_utterances_in_stories(ignore_warnings) + return intents_are_valid and stories_are_valid and there_is_no_duplication + + def verify_domain_validity(self) -> bool: + """Checks whether the domain returned by the importer is empty. + + An empty domain is invalid.""" + + return not self.domain.is_empty() + + @staticmethod + def validate_training_data_format_version( + yaml_file_content: Dict[Text, Any], filename: Text + ) -> bool: + """Validates version on the training data content using `version` field + and warns users if the file is not compatible with the current version of + Rasa Open Source. + + Args: + yaml_file_content: Raw content of training data file as a dictionary. + filename: Name of the validated file. + + Returns: + `True` if the file can be processed by current version of Rasa Open Source, + `False` otherwise. + """ + if not isinstance(yaml_file_content, dict): + raise ValueError(f"Failed to validate {filename}.") + + version_value = yaml_file_content.get(KEY_TRAINING_DATA_FORMAT_VERSION) + + if not version_value: + raise_warning( + f"Training data file {filename} doesn't have a " + f"'{KEY_TRAINING_DATA_FORMAT_VERSION}' key. " + f"Rasa Open Source will read the file as a " + f"version '{LATEST_TRAINING_DATA_FORMAT_VERSION}' file.", + docs=DOCS_BASE_URL, + ) + return True + + try: + parsed_version = version.parse(version_value) + if isinstance(parsed_version, LegacyVersion): + raise TypeError + + if version.parse(LATEST_TRAINING_DATA_FORMAT_VERSION) >= parsed_version: + return True + + except TypeError: + raise_warning( + f"Training data file {filename} must specify " + f"'{KEY_TRAINING_DATA_FORMAT_VERSION}' as string, for example:\n" + f"{KEY_TRAINING_DATA_FORMAT_VERSION}: '{LATEST_TRAINING_DATA_FORMAT_VERSION}'\n" + f"Rasa Open Source will read the file as a " + f"version '{LATEST_TRAINING_DATA_FORMAT_VERSION}' file.", + docs=DOCS_BASE_URL, + ) + return True + + raise_warning( + f"Training data file {filename} has a greater format version than " + f"your Rasa Open Source installation: " + f"{version_value} > {LATEST_TRAINING_DATA_FORMAT_VERSION}. " + f"Please consider updating to the latest version of Rasa Open Source." + f"This file will be skipped.", + docs=DOCS_BASE_URL, + ) + return False diff --git a/rasa/version.py b/rasa/version.py index 984d51136983..62a2a2220277 100644 --- a/rasa/version.py +++ b/rasa/version.py @@ -1 +1,3 @@ -__version__ = "1.4.0a1" +# this file will automatically be changed, +# do not add anything but the version number here! +__version__ = "2.0.0a2" diff --git a/rasa_core/__init__.py b/rasa_core/__init__.py deleted file mode 100644 index 178601a6c8ff..000000000000 --- a/rasa_core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import rasa.core -import sys -import warnings - -# this makes sure old code can still import from `rasa_core` -# although the package has been moved to `rasa.core` -sys.modules["rasa_core"] = rasa.core - -warnings.warn( - "The 'rasa_core' package has been renamed. You should change " - "your imports to use 'rasa.core' instead.", - UserWarning, -) diff --git a/rasa_nlu/__init__.py b/rasa_nlu/__init__.py deleted file mode 100644 index 4937373447b8..000000000000 --- a/rasa_nlu/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import rasa.nlu -import sys -import warnings - -# this makes sure old code can still import from `rasa_nlu` -# although the package has been moved to `rasa.nlu` -sys.modules["rasa_nlu"] = rasa.nlu - -warnings.warn( - "The 'rasa_nlu' package has been renamed. You should change " - "your imports to use 'rasa.nlu' instead.", - UserWarning, -) diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index f189cb0531b1..000000000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,28 +0,0 @@ -# base and pipeline dependencies -# requirements_full.txt includes requirements.txt, so no need to -# include it here. --r alt_requirements/requirements_full.txt - -# test -pytest==4.5.0 -pytest-cov==2.7.1 -pytest-localserver==0.5.0 -pytest-sanic==1.0.0 -freezegun==0.3.12 -responses==0.9.0 -nbsphinx==0.3.2 -aioresponses==0.6.0 -moto==1.3.8 -fakeredis==1.0.3 -six>=1.12.0 # upstream - should be removed if fakeredis depends on at least 1.12.0 - -# lint/format/types -black==19.3b0; python_version>='3.6' -flake8==3.7.8 -pytype==2019.7.11 - -# other -google-cloud-storage==1.7.0 -azure-storage-blob==1.0.0 -coveralls==1.7.0 - diff --git a/requirements-docs.txt b/requirements-docs.txt deleted file mode 100644 index 9f5b0e4e524d..000000000000 --- a/requirements-docs.txt +++ /dev/null @@ -1,13 +0,0 @@ -sphinx==1.8.2 -sphinx-autobuild==0.7.1 -sphinxcontrib-programoutput==0.11 -nbsphinx==0.3.2 -pygments==2.2.0 -sphinxcontrib-httpdomain==1.6.1 -sphinxcontrib-websupport==1.1.0 -sphinxcontrib-trio==1.0.2 -sphinx-tabs==1.1.11 -sphinx-autodoc-typehints==1.6.0 -https://storage.googleapis.com/docs-theme/rasabaster-0.7.23.tar.gz -git+https://github.com/RasaHQ/sphinxcontrib-versioning.git#egg=sphinxcontrib-versioning -git+https://github.com/RasaHQ/sphinx_rtd_theme.git#egg=sphinx_rtd_theme diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4d7ea396e7e7..000000000000 --- a/requirements.txt +++ /dev/null @@ -1,60 +0,0 @@ -## Setup py requirements -requests==2.22.0 -boto3==1.9.146 -matplotlib==3.0.3 -simplejson==3.16.0 -attrs==19.1.0 -jsonpickle==1.1 -redis==3.3.5 -pymongo==3.8.0 -numpy==1.16.3 -scipy==1.2.1 -tensorflow==1.14.0 -absl-py>=0.8.0 -# setuptools comes from tensorboard requirement: -# https://github.com/tensorflow/tensorboard/blob/1.14/tensorboard/pip_package/setup.py#L33 -setuptools >= 41.0.0 -tensorflow-probability==0.7.0 -tensor2tensor==1.14.0 -apscheduler==3.6.0 -tqdm==4.31.0 -networkx==2.3 -fbmessenger==6.0.0 -pykwalify==1.7.0 -coloredlogs==10.0 -ruamel.yaml==0.15.94 -scikit-learn==0.20.2 -slackclient==1.3.1 -python-telegram-bot==11.1.0 -twilio==6.26.3 -webexteamssdk==1.1.1 -mattermostwrapper==2.1 -rocketchat_API==0.6.31 -colorhash==1.0.2 -pika==1.0.1 -jsonschema==2.6.0 -packaging==19.0 -gevent==1.4.0 -pytz==2019.1 -python-dateutil==2.8.0 -rasa-sdk~=1.3.0 -colorclass==2.2.0 -terminaltables==3.1.0 -sanic==19.3.1 -sanic-cors==0.9.8 -sanic-jwt==1.3.1 -aiohttp==3.5.4 -questionary==1.1.1 -python-socketio==4.3.1 -# the below can be unpinned when python-socketio pins >=3.9.3 -python-engineio==3.9.3 -pydot==1.4.1 -async_generator==1.10 -SQLAlchemy~=1.3.3 -kafka-python==1.4.6 -sklearn-crfsuite==0.3.6 -psycopg2-binary==2.8.2 -PyJWT==1.7.1 -# remove when tensorflow@1.15.x or a pre-release patch is released -# https://github.com/tensorflow/tensorflow/issues/32319 -gast==0.2.2 diff --git a/sample_configs/config_pretrained_embeddings_spacy.yml b/sample_configs/config_pretrained_embeddings_spacy.yml deleted file mode 100644 index 3516519cd529..000000000000 --- a/sample_configs/config_pretrained_embeddings_spacy.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: "en" - -pipeline: "pretrained_embeddings_spacy" diff --git a/sample_configs/config_pretrained_embeddings_spacy_de.yml b/sample_configs/config_pretrained_embeddings_spacy_de.yml deleted file mode 100644 index 7345028fab2e..000000000000 --- a/sample_configs/config_pretrained_embeddings_spacy_de.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: "de" - -pipeline: "pretrained_embeddings_spacy" diff --git a/sample_configs/config_supervised_embeddings.yml b/sample_configs/config_supervised_embeddings.yml deleted file mode 100644 index 3d965f6147d4..000000000000 --- a/sample_configs/config_supervised_embeddings.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: "en" - -pipeline: "supervised_embeddings" diff --git a/sample_configs/config_train_server_md.yml b/sample_configs/config_train_server_md.yml deleted file mode 100644 index c28f66845bf0..000000000000 --- a/sample_configs/config_train_server_md.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: "en" - -pipeline: "pretrained_embeddings_spacy" - -# data contains the same md, as described in the training data section -data: | - ## intent:affirm - - yes - - yep - - ## intent:goodbye - - bye - - goodbye diff --git a/scripts/ping_slack_about_package_release.sh b/scripts/ping_slack_about_package_release.sh old mode 100644 new mode 100755 index 9b9b3e6ae157..ef97ead7a178 --- a/scripts/ping_slack_about_package_release.sh +++ b/scripts/ping_slack_about_package_release.sh @@ -1,8 +1,10 @@ -#!/usr/bin/env bash +#!/bin/bash -if [[ ${TRAVIS_TAG} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then +set -Eeuo pipefail + +if [[ ${GITHUB_TAG} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then curl -X POST -H "Content-type: application/json" \ - --data "{\"text\":\"💥 New *Rasa* version ${TRAVIS_TAG} has been released! Changelog: https://rasa.com/docs/rasa/${TRAVIS_TAG}/changelog/#id1\"}" \ + --data "{\"text\":\"💥 New *Rasa Open Source* version ${GITHUB_TAG} has been released! https://github.com/RasaHQ/rasa/releases/tag/${GITHUB_TAG}\"}" \ "https://hooks.slack.com/services/T0GHWFTS8/BMTQQL47K/${SLACK_WEBHOOK_TOKEN}" fi diff --git a/scripts/poetry-version.sh b/scripts/poetry-version.sh new file mode 100755 index 000000000000..1332d33399e3 --- /dev/null +++ b/scripts/poetry-version.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +"""Extract the poetry version from pyproject.toml. Used in e.g. github workflows.""" + +import sys +import re + +if __name__ == "__main__": + version_regex = r'"poetry[^\"]*=([^\"]+)"' + with open("pyproject.toml") as f: + for line in f: + m = re.search(version_regex, line) + if m: + print(m.group(1)) + sys.exit(0) + else: + print("Failed to find poetry version.") + sys.exit(1) diff --git a/scripts/publish_gh_release_notes.py b/scripts/publish_gh_release_notes.py new file mode 100644 index 000000000000..6ccdde61124e --- /dev/null +++ b/scripts/publish_gh_release_notes.py @@ -0,0 +1,103 @@ +""" +Script used to publish GitHub release notes extracted from CHANGELOG.mdx. +This script is executed by GitHub after a new release was successfully built. + +Uses the following environment variables: +* GITHUB_TAG: the name of the tag of the current commit. +* GITHUB_TOKEN: a personal access token with 'repo' permissions. + +The script also requires ``pandoc`` to be previously installed in the system. +Requires Python3.6+. + +Based on code from pytest. +https://github.com/pytest-dev/pytest/blob/master/scripts/publish_gh_release_notes.py +Copyright Holger Krekel and others, 2004-2019. + +Distributed under the terms of the MIT license, pytest is free and open source software. +""" +import os +import re +import sys +from pathlib import Path +from typing import Text + +# if this needs any more dependencies, they need to be installed on github deploy stage +import github3 +from pep440_version_utils import Version + + +def create_github_release(slug: Text, token: Text, tag_name: Text, body: Text): + """Create a github release.""" + + github = github3.login(token=token) + owner, repo = slug.split("/") + repo = github.repository(owner, repo) + return repo.create_release(tag_name=tag_name, body=body) + + +def parse_changelog(tag_name: Text) -> Text: + """Read the changelog and extract the most recently release entry.""" + + p = Path(__file__).parent.parent / "CHANGELOG.mdx" + changelog_lines = p.read_text(encoding="UTF-8").splitlines() + + title_regex = re.compile(r"##\s*\[(\d+\.\d+\.\d+)(\S*)\]\s*-\s*\d{4}-\d{2}-\d{2}") + consuming_version = False + version_lines = [] + for line in changelog_lines: + m = title_regex.match(line) + if m: + # found the version we want: start to consume lines + # until we find the next version title + if m.group(1) == tag_name: + consuming_version = True + # found a new version title while parsing the version we want: break out + elif consuming_version: + break + if consuming_version: + version_lines.append(line) + + # drop the first lines (version headline, not needed for GH) + return "\n".join(version_lines[2:]).strip() + + +def main(): + tag_name = os.environ.get("GITHUB_TAG") + if not tag_name: + print("environment variable GITHUB_TAG not set", file=sys.stderr) + return 1 + + token = os.environ.get("GITHUB_TOKEN") + if not token: + print("GITHUB_TOKEN not set", file=sys.stderr) + return 1 + + slug = os.environ.get("GITHUB_REPO_SLUG") + if not slug: + print("GITHUB_REPO_SLUG not set", file=sys.stderr) + return 1 + + version = Version(tag_name) + if version.pre: + md_body = "_Pre-release version_" + else: + md_body = parse_changelog(tag_name) + + if not md_body: + print("Failed to extract changelog entries for version from changelog.") + return 2 + + if not create_github_release(slug, token, tag_name, md_body): + print("Could not publish release notes:", file=sys.stderr) + print(md_body, file=sys.stderr) + return 5 + + print() + print(f"Release notes for {tag_name} published successfully:") + print(f"https://github.com/{slug}/releases/tag/{tag_name}") + print() + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/push_docs_to_branch.sh b/scripts/push_docs_to_branch.sh new file mode 100755 index 000000000000..0e59d33622fb --- /dev/null +++ b/scripts/push_docs_to_branch.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +set -Eeuo pipefail + +TODAY=`date "+%Y%m%d"` +# we build new versions only for minors and majors +PATTERN_FOR_NEW_VERSION="^refs/tags/[0-9]+\\.[0-9]+\\.0$" +PATTERN_FOR_PATCH_VERSION="^refs/tags/[0-9]+\\.[0-9]+\\.[1-9]+$" +MASTER_REF=refs/heads/master +VARIABLES_JSON=docs/docs/variables.json +SOURCES_FILES=docs/docs/sources/ +REFERENCE_FILES=docs/docs/reference/ +CHANGELOG=docs/docs/changelog.mdx + +[[ ! $GITHUB_REF =~ $PATTERN_FOR_NEW_VERSION ]] \ +&& [[ ! $GITHUB_REF =~ $PATTERN_FOR_PATCH_VERSION ]] \ +&& [[ $GITHUB_REF != $MASTER_REF ]] \ +&& echo "Not on master or tagged version, skipping." \ +&& exit 0 + +NEW_VERSION= +EXISTING_VERSION= +if [[ "$GITHUB_REF" =~ $PATTERN_FOR_NEW_VERSION ]] +then + NEW_VERSION=${GITHUB_REF/refs\/tags\//} +elif [[ "$GITHUB_REF" =~ $PATTERN_FOR_PATCH_VERSION ]] +then + EXISTING_VERSION=$(echo $GITHUB_REF | sed -E "s/^refs\/tags\/([0-9]+)\.([0-9]+)\.[0-9]+$/\1.\2.0/") +fi + +# clone the $DOCS_BRANCH in a temp directory +git clone --depth=1 --branch=$DOCS_BRANCH git@github.com:$GITHUB_REPOSITORY.git $TMP_DOCS_FOLDER + +echo "Updating the docs..." +cp -R `ls -A | grep -v "^\.git$"` $TMP_DOCS_FOLDER/ + + +cd $TMP_DOCS_FOLDER + +if [ ! -z "$NEW_VERSION" ] +then + echo "Generating docs for new version $NEW_VERSION..." + cd docs + yarn run new-version $NEW_VERSION + cd .. +fi + +if [ ! -z "$EXISTING_VERSION" ] +then + echo "Updating docs for existing version $EXISTING_VERSION..." + cd docs + cp -R docs/ versioned_docs/version-$EXISTING_VERSION/ + # remove updates to the "next" version + git checkout docs/ + git clean docs/ +fi + +if [ -z "$(git status --porcelain)" ] +then + echo "Nothing changed in docs, done 👍" +else + echo "Pushing changes to git..." + git add . + git add --force $VARIABLES_JSON $SOURCES_FILES $CHANGELOG $REFERENCE_FILES + git commit -am "AUTO docusaurus $TODAY" + git fetch --unshallow + git push origin $DOCS_BRANCH + + echo "Done 👌" +fi diff --git a/scripts/release.py b/scripts/release.py new file mode 100644 index 000000000000..f3e2bc350e42 --- /dev/null +++ b/scripts/release.py @@ -0,0 +1,356 @@ +"""Prepare a Rasa OSS release. + +- creates a release branch +- creates a new changelog section in CHANGELOG.mdx based on all collected changes +- increases the version number +- pushes the new branch to GitHub +""" +import argparse +import os +import re +import sys +from pathlib import Path +from subprocess import CalledProcessError, check_call, check_output +from typing import Text, Set + +import questionary +import toml +from pep440_version_utils import Version, is_valid_version + + +VERSION_FILE_PATH = "rasa/version.py" + +PYPROJECT_FILE_PATH = "pyproject.toml" + +REPO_BASE_URL = "https://github.com/RasaHQ/rasa" + +RELEASE_BRANCH_PREFIX = "prepare-release-" + +PRERELEASE_FLAVORS = ("alpha", "rc") + +RELEASE_BRANCH_PATTERN = re.compile(r"^\d+\.\d+\.x$") + + +def create_argument_parser() -> argparse.ArgumentParser: + """Parse all the command line arguments for the release script.""" + + parser = argparse.ArgumentParser(description="prepare the next library release") + parser.add_argument( + "--next_version", + type=str, + help="Either next version number or 'major', 'minor', 'micro', 'alpha', 'rc'", + ) + + return parser + + +def project_root() -> Path: + """Root directory of the project.""" + return Path(os.path.dirname(__file__)).parent + + +def version_file_path() -> Path: + """Path to the python file containing the version number.""" + return project_root() / VERSION_FILE_PATH + + +def pyproject_file_path() -> Path: + """Path to the pyproject.toml.""" + return project_root() / PYPROJECT_FILE_PATH + + +def write_version_file(version: Version) -> None: + """Dump a new version into the python version file.""" + + with version_file_path().open("w") as f: + f.write( + f"# this file will automatically be changed,\n" + f"# do not add anything but the version number here!\n" + f'__version__ = "{version}"\n' + ) + check_call(["git", "add", str(version_file_path().absolute())]) + + +def write_version_to_pyproject(version: Version) -> None: + """Dump a new version into the pyproject.toml.""" + pyproject_file = pyproject_file_path() + + try: + data = toml.load(pyproject_file) + data["tool"]["poetry"]["version"] = str(version) + with pyproject_file.open("w", encoding="utf8") as f: + toml.dump(data, f) + except (FileNotFoundError, TypeError): + print(f"Unable to update {pyproject_file}: file not found.") + sys.exit(1) + except toml.TomlDecodeError: + print(f"Unable to parse {pyproject_file}: incorrect TOML file.") + sys.exit(1) + + check_call(["git", "add", str(pyproject_file.absolute())]) + + +def get_current_version() -> Text: + """Return the current library version.""" + + if not version_file_path().is_file(): + raise FileNotFoundError( + f"Failed to find version file at {version_file_path().absolute()}" + ) + + # context in which we evaluate the version py - + # to be able to access the defined version, it already needs to live in the + # context passed to exec + _globals = {"__version__": ""} + with version_file_path().open() as f: + exec(f.read(), _globals) + + return _globals["__version__"] + + +def confirm_version(version: Version) -> bool: + """Allow the user to confirm the version number.""" + + if str(version) in git_existing_tags(): + confirmed = questionary.confirm( + f"Tag with version '{version}' already exists, overwrite?", default=False + ).ask() + else: + confirmed = questionary.confirm( + f"Current version is '{get_current_version()}. " + f"Is the next version '{version}' correct ?", + default=True, + ).ask() + if confirmed: + return True + else: + print("Aborting.") + sys.exit(1) + + +def ask_version() -> Text: + """Allow the user to confirm the version number.""" + + def is_valid_version_number(v: Text) -> bool: + return v in {"major", "minor", "micro", "alpha", "rc"} or is_valid_version(v) + + current_version = Version(get_current_version()) + next_micro_version = str(current_version.next_micro()) + next_alpha_version = str(current_version.next_alpha()) + version = questionary.text( + f"What is the version number you want to release " + f"('major', 'minor', 'micro', 'alpha', 'rc' or valid version number " + f"e.g. '{next_micro_version}' or '{next_alpha_version}')?", + validate=is_valid_version_number, + ).ask() + + if version in PRERELEASE_FLAVORS and not current_version.pre: + # at this stage it's hard to guess the kind of version bump the + # releaser wants, so we ask them + if version == "alpha": + choices = [ + str(current_version.next_alpha("minor")), + str(current_version.next_alpha("micro")), + str(current_version.next_alpha("major")), + ] + else: + choices = [ + str(current_version.next_release_candidate("minor")), + str(current_version.next_release_candidate("micro")), + str(current_version.next_release_candidate("major")), + ] + version = questionary.select( + f"Which {version} do you want to release?", choices=choices, + ).ask() + + if version: + return version + else: + print("Aborting.") + sys.exit(1) + + +def get_rasa_sdk_version() -> Text: + """Find out what the referenced version of the Rasa SDK is.""" + + dependencies_filename = "pyproject.toml" + toml_data = toml.load(project_root() / dependencies_filename) + + try: + sdk_version = toml_data["tool"]["poetry"]["dependencies"]["rasa-sdk"] + return sdk_version[1:].strip() + except AttributeError: + raise Exception(f"Failed to find Rasa SDK version in {dependencies_filename}") + + +def validate_code_is_release_ready(version: Version) -> None: + """Make sure the code base is valid (e.g. Rasa SDK is up to date).""" + + sdk = Version(get_rasa_sdk_version()) + sdk_version = (sdk.major, sdk.minor) + rasa_version = (version.major, version.minor) + + if sdk_version != rasa_version: + print() + print( + f"\033[91m There is a mismatch between the Rasa SDK version ({sdk}) " + f"and the version you want to release ({version}). Before you can " + f"release Rasa OSS, you need to release the SDK and update " + f"the dependency. \033[0m" + ) + print() + sys.exit(1) + + +def git_existing_tags() -> Set[Text]: + """Return all existing tags in the local git repo.""" + + stdout = check_output(["git", "tag"]) + return set(stdout.decode().split("\n")) + + +def git_current_branch() -> Text: + """Returns the current git branch of the local repo.""" + + try: + output = check_output(["git", "symbolic-ref", "--short", "HEAD"]) + return output.decode().strip() + except CalledProcessError: + # e.g. we are in detached head state + return "master" + + +def git_current_branch_is_master_or_release() -> bool: + """ + Returns True if the current local git + branch is master or a release branch e.g. 1.10.x + """ + current_branch = git_current_branch() + return ( + current_branch == "master" + or RELEASE_BRANCH_PATTERN.match(current_branch) is not None + ) + + +def create_release_branch(version: Version) -> Text: + """Create a new branch for this release. Returns the branch name.""" + + branch = f"{RELEASE_BRANCH_PREFIX}{version}" + check_call(["git", "checkout", "-b", branch]) + return branch + + +def create_commit(version: Version) -> None: + """Creates a git commit with all stashed changes.""" + check_call(["git", "commit", "-m", f"prepared release of version {version}"]) + + +def push_changes() -> None: + """Pushes the current branch to origin.""" + check_call(["git", "push", "origin", "HEAD"]) + + +def ensure_clean_git() -> None: + """Makes sure the current working git copy is clean.""" + + try: + check_call(["git", "diff-index", "--quiet", "HEAD", "--"]) + except CalledProcessError: + print("Your git is not clean. Release script can only be run from a clean git.") + sys.exit(1) + + +def parse_next_version(version: Text) -> Version: + """Find the next version as a proper semantic version string.""" + if version == "major": + return Version(get_current_version()).next_major() + elif version == "minor": + return Version(get_current_version()).next_minor() + elif version == "micro": + return Version(get_current_version()).next_micro() + elif version == "alpha": + return Version(get_current_version()).next_alpha() + elif version == "rc": + return Version(get_current_version()).next_release_candidate() + elif is_valid_version(version): + return Version(version) + else: + raise Exception(f"Invalid version number '{cmdline_args.next_version}'.") + + +def next_version(args: argparse.Namespace) -> Version: + """Take cmdline args or ask the user for the next version and return semver.""" + return parse_next_version(args.next_version or ask_version()) + + +def generate_changelog(version: Version) -> None: + """Call tonwcrier and create a changelog from all available changelog entries.""" + check_call( + ["towncrier", "--yes", "--version", str(version)], cwd=str(project_root()) + ) + + +def print_done_message(branch: Text, base: Text, version: Version) -> None: + """Print final information for the user on what to do next.""" + + pull_request_url = f"{REPO_BASE_URL}/compare/{base}...{branch}?expand=1" + + print() + print(f"\033[94m All done - changes for version {version} are ready! \033[0m") + print() + print(f"Please open a PR on GitHub: {pull_request_url}") + + +def print_done_message_same_branch(version: Version) -> None: + """ + Print final information for the user in case changes + are directly committed on this branch. + """ + + print() + print( + f"\033[94m All done - changes for version {version} where committed on this branch \033[0m" + ) + + +def main(args: argparse.Namespace) -> None: + """Start a release preparation.""" + + print( + "The release script will increase the version number, " + "create a changelog and create a release branch. Let's go!" + ) + + ensure_clean_git() + version = next_version(args) + confirm_version(version) + + validate_code_is_release_ready(version) + + write_version_file(version) + write_version_to_pyproject(version) + + if not version.pre: + # never update changelog on a prerelease version + generate_changelog(version) + + # alpha workflow on feature branch when a version bump is required + if version.is_alpha and not git_current_branch_is_master_or_release(): + create_commit(version) + push_changes() + + print_done_message_same_branch(version) + else: + base = git_current_branch() + branch = create_release_branch(version) + + create_commit(version) + push_changes() + + print_done_message(branch, base, version) + + +if __name__ == "__main__": + arg_parser = create_argument_parser() + cmdline_args = arg_parser.parse_args() + main(cmdline_args) diff --git a/setup.cfg b/setup.cfg index 488c26c671e0..455ee150056f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,20 +7,15 @@ python_functions=test_ codestyle_max_line_length = 88 codestyle_ignore = E302 W503 E203 E501 E265 E402 E251 E211 codestyle_exclude = - docs/core/conf.py - docs/nlu/conf.py rasa/core/policies/tf_utils.py rasa/core/policies/__init__.py filterwarnings = ignore::ResourceWarning:ruamel[.*] + #error log_cli = true log_cli_level = WARNING -[metadata] -description-file = README.md -license_file = LICENSE.txt - [flake8] max-line-length = 88 -ignore = W503, E121, E126, E211, E225, E501, E203, E402, F401, F811 \ No newline at end of file +ignore = W503, E121, E126, E211, E225, E501, E203, E402, F401, F811, E231 diff --git a/setup.py b/setup.py deleted file mode 100644 index 57d0b93cc3ad..000000000000 --- a/setup.py +++ /dev/null @@ -1,149 +0,0 @@ -import os - -from setuptools import setup, find_packages - -here = os.path.abspath(os.path.dirname(__file__)) - -# Avoids IDE errors, but actual version is read from version.py -__version__ = None -with open("rasa/version.py") as f: - exec (f.read()) - -# Get the long description from the README file -with open(os.path.join(here, "README.md"), encoding="utf-8") as f: - long_description = f.read() - -tests_requires = [ - "pytest~=4.5", - "pytest-cov~=2.7", - "pytest-localserver~=0.5.0", - "pytest-sanic~=1.0.0", - "responses~=0.9.0", - "freezegun~=0.3.0", - "nbsphinx>=0.3", - "aioresponses~=0.6.0", - "moto~=1.3.8", - "fakeredis~=1.0", - # upstream dep from fakeredis, should be removed if fakeredis properly depends on - # at least 1.12 - "six>=1.12.0", -] - -install_requires = [ - "requests>=2.20", - "boto3~=1.9", - "matplotlib~=3.0", - "simplejson~=3.16", - "attrs>=18", - "jsonpickle~=1.1", - "redis~=3.3.5", - "pymongo~=3.8", - "numpy~=1.16", - "scipy~=1.2", - "tensorflow~=1.14.0", - # absl is a tensorflow dependency, but produces double logging before 0.8 - # should be removed once tensorflow requires absl > 0.8 on its own - "absl-py>=0.8.0", - # setuptools comes from tensorboard requirement: - # https://github.com/tensorflow/tensorboard/blob/1.14/tensorboard/pip_package/setup.py#L33 - "setuptools >= 41.0.0", - "tensorflow-probability~=0.7.0", - "tensor2tensor~=1.14.0", - "apscheduler~=3.0", - "tqdm~=4.0", - "networkx~=2.3", - "fbmessenger~=6.0", - "pykwalify~=1.7.0", - "coloredlogs~=10.0", - "scikit-learn~=0.20.2", - "ruamel.yaml~=0.15.0", - "scikit-learn~=0.20.0", - "slackclient~=1.3", - "python-telegram-bot~=11.0", - "twilio~=6.0", - "webexteamssdk~=1.1", - "mattermostwrapper~=2.0", - "rocketchat_API~=0.6.0", - "colorhash~=1.0", - "pika~=1.0.0", - "jsonschema~=2.6", - "packaging~=19.0", - "gevent~=1.4", - "pytz~=2019.1", - "python-dateutil~=2.8", - "rasa-sdk~=1.3.0", - "colorclass~=2.2", - "terminaltables~=3.1", - "sanic~=19.3.1", - "sanic-cors~=0.9.0", - "sanic-jwt~=1.3", - "aiohttp~=3.5", - "questionary>=1.1.0", - "python-socketio>=4.3.1", - # the below can be unpinned when python-socketio pins >=3.9.3 - "python-engineio>=3.9.3", - "pydot~=1.4", - "async_generator~=1.10", - "SQLAlchemy~=1.3.0", - "kafka-python~=1.4", - "sklearn-crfsuite~=0.3.6", - "PyJWT~=1.7", - # remove when tensorflow@1.15.x or a pre-release patch is released - # https://github.com/tensorflow/tensorflow/issues/32319 - "gast==0.2.2", -] - -extras_requires = { - "test": tests_requires, - "spacy": ["spacy>=2.1,<2.2"], - "mitie": ["mitie"], - "sql": ["psycopg2~=2.8.2", "SQLAlchemy~=1.3"], -} - -setup( - name="rasa", - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - # supported python versions - "Programming Language :: Python", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Topic :: Software Development :: Libraries", - ], - packages=find_packages(exclude=["tests", "tools", "docs", "contrib"]), - entry_points={"console_scripts": ["rasa=rasa.__main__:main"]}, - version=__version__, - install_requires=install_requires, - tests_require=tests_requires, - extras_require=extras_requires, - include_package_data=True, - description="Open source machine learning framework to automate text- and " - "voice-based conversations: NLU, dialogue management, connect to " - "Slack, Facebook, and more - Create chatbots and voice assistants", - long_description=long_description, - long_description_content_type="text/markdown", - author="Rasa Technologies GmbH", - author_email="hi@rasa.com", - maintainer="Tom Bocklisch", - maintainer_email="tom@rasa.com", - license="Apache 2.0", - keywords="nlp machine-learning machine-learning-library bot bots " - "botkit rasa conversational-agents conversational-ai chatbot" - "chatbot-framework bot-framework", - url="https://rasa.com", - download_url="https://github.com/RasaHQ/rasa/archive/{}.tar.gz" - "".format(__version__), - project_urls={ - "Bug Reports": "https://github.com/rasahq/rasa/issues", - "Source": "https://github.com/rasahq/rasa", - }, -) - -print ("\nWelcome to Rasa!") -print ( - "If you have any questions, please visit our documentation page: https://rasa.com/docs/" -) -print ("or join the community discussions on https://forum.rasa.com/") diff --git a/tests/cli/conftest.py b/tests/cli/conftest.py index d3783f36224f..0e323de1516e 100644 --- a/tests/cli/conftest.py +++ b/tests/cli/conftest.py @@ -1,9 +1,20 @@ +from pathlib import Path + +from subprocess import check_call + +from _pytest.tmpdir import TempdirFactory +from typing import Callable, Text import pytest +import shutil import os +from _pytest.pytester import Testdir, RunResult + +from rasa.cli import scaffold +from rasa.utils.io import write_yaml @pytest.fixture -def run(testdir): +def run(testdir: Testdir) -> Callable[..., RunResult]: def do_run(*args): args = ["rasa"] + list(args) return testdir.run(*args) @@ -12,12 +23,77 @@ def do_run(*args): @pytest.fixture -def run_in_default_project(testdir): +def run_with_stdin(testdir: Testdir) -> Callable[..., RunResult]: + def do_run(*args, stdin): + args = ["rasa"] + list(args) + return testdir.run(*args, stdin=stdin) + + return do_run + + +def create_simple_project(path: Path): + scaffold.create_initial_project(str(path)) + + # create a config file + # for the cli test the resulting model is not important, use components that are + # fast to train + write_yaml( + { + "language": "en", + "pipeline": [{"name": "KeywordIntentClassifier"}], + "policies": [ + {"name": "MappingPolicy"}, + {"name": "MemoizationPolicy", "max_history": 3}, + ], + }, + path / "config.yml", + ) + return path + + +@pytest.fixture(scope="session") +def trained_simple_project(tmpdir_factory: TempdirFactory) -> Text: + path = tmpdir_factory.mktemp("simple") + create_simple_project(path) + os.environ["LOG_LEVEL"] = "ERROR" - testdir.run("rasa", "init", "--no-prompt") + + check_call(["rasa", "train"], cwd=path.strpath) + + return path.strpath + + +@pytest.fixture +def run_in_simple_project(testdir: Testdir) -> Callable[..., RunResult]: + os.environ["LOG_LEVEL"] = "ERROR" + + create_simple_project(testdir.tmpdir) def do_run(*args): args = ["rasa"] + list(args) return testdir.run(*args) return do_run + + +@pytest.fixture +def run_in_simple_project_with_model( + testdir: Testdir, trained_simple_project: Text +) -> Callable[..., RunResult]: + os.environ["LOG_LEVEL"] = "ERROR" + + # makes sure we do not always retrain an initial model for every "new" project + for file_name in os.listdir(trained_simple_project): + full_file_name = os.path.join(trained_simple_project, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, str(testdir.tmpdir)) + else: + shutil.copytree(full_file_name, str(testdir.tmpdir / file_name)) + + def do_run(*args): + args = ["rasa"] + list(args) + result = testdir.run(*args) + os.environ["LOG_LEVEL"] = "INFO" + return result + + return do_run diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index 8e9da5bc9e66..f2e1b1c18627 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -1,8 +1,13 @@ +from typing import Callable +from _pytest.pytester import RunResult import pytest +import sys -@pytest.mark.repeat(3) -def test_cli_start(run): +def test_cli_start(run: Callable[..., RunResult]): + """ + Checks that a call to ``rasa --help`` does not take longer than 7 seconds. + """ import time start = time.time() @@ -11,16 +16,29 @@ def test_cli_start(run): duration = end - start - assert duration < 3 # startup of cli should not take longer than 3 seconds + assert duration <= 7 -def test_data_convert_help(run): +def test_data_convert_help(run: Callable[..., RunResult]): output = run("--help") help_text = """usage: rasa [-h] [--version] - {init,run,shell,train,interactive,test,visualize,data,x} ...""" + {init,run,shell,train,interactive,test,visualize,data,export,x} + ...""" lines = help_text.split("\n") for i, line in enumerate(lines): assert output.outlines[i] == line + + +@pytest.mark.xfail( + sys.platform == "win32", reason="--version doesn't print anything on Windows" +) +def test_version_print_lines(run: Callable[..., RunResult]): + output = run("--version") + output_text = "".join(output.outlines) + assert "Rasa Version" in output_text + assert "Python Version" in output_text + assert "Operating System" in output_text + assert "Python Path" in output_text diff --git a/tests/cli/test_rasa_data.py b/tests/cli/test_rasa_data.py index 8cef4774cb16..d0207648965b 100644 --- a/tests/cli/test_rasa_data.py +++ b/tests/cli/test_rasa_data.py @@ -1,23 +1,36 @@ +import argparse import os +from unittest.mock import Mock +import pytest +from collections import namedtuple +from typing import Callable, Text +from _pytest.monkeypatch import MonkeyPatch +from _pytest.pytester import RunResult +from rasa.cli import data +from rasa.importers.importer import TrainingDataImporter +from rasa.validator import Validator -def test_data_split_nlu(run_in_default_project): - run_in_default_project( - "data", "split", "nlu", "-u", "data/nlu.md", "--training-fraction", "0.75" + +def test_data_split_nlu(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project( + "data", "split", "nlu", "-u", "data/nlu.yml", "--training-fraction", "0.75" ) assert os.path.exists("train_test_split") - assert os.path.exists(os.path.join("train_test_split", "test_data.md")) - assert os.path.exists(os.path.join("train_test_split", "training_data.md")) + # TODO: Comment back in as soon as NLU YAML writer is merged + # https://github.com/RasaHQ/rasa/issues/6363 + # assert os.path.exists(os.path.join("train_test_split", "test_data.md")) + # assert os.path.exists(os.path.join("train_test_split", "training_data.md")) -def test_data_convert_nlu(run_in_default_project): - run_in_default_project( +def test_data_convert_nlu(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project( "data", "convert", "nlu", "--data", - "data/nlu.md", + "data/nlu.yml", "--out", "out_nlu_data.json", "-f", @@ -27,11 +40,12 @@ def test_data_convert_nlu(run_in_default_project): assert os.path.exists("out_nlu_data.json") -def test_data_split_help(run): +def test_data_split_help(run: Callable[..., RunResult]): output = run("data", "split", "nlu", "--help") help_text = """usage: rasa data split nlu [-h] [-v] [-vv] [--quiet] [-u NLU] - [--training-fraction TRAINING_FRACTION] [--out OUT]""" + [--training-fraction TRAINING_FRACTION] + [--random-seed RANDOM_SEED] [--out OUT]""" lines = help_text.split("\n") @@ -39,11 +53,11 @@ def test_data_split_help(run): assert output.outlines[i] == line -def test_data_convert_help(run): +def test_data_convert_help(run: Callable[..., RunResult]): output = run("data", "convert", "nlu", "--help") help_text = """usage: rasa data convert nlu [-h] [-v] [-vv] [--quiet] --data DATA --out OUT - [-l LANGUAGE] -f {json,md}""" + [-l LANGUAGE] -f {json,md,yaml}""" lines = help_text.split("\n") @@ -51,13 +65,100 @@ def test_data_convert_help(run): assert output.outlines[i] == line -def test_data_validate_help(run): +def test_data_validate_help(run: Callable[..., RunResult]): output = run("data", "validate", "--help") - help_text = """usage: rasa data validate [-h] [-v] [-vv] [--quiet] [--fail-on-warnings] - [-d DOMAIN] [--data DATA]""" + help_text = """usage: rasa data validate [-h] [-v] [-vv] [--quiet] + [--max-history MAX_HISTORY] [--fail-on-warnings]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert output.outlines[i] == line + + +def _text_is_part_of_output_error(text: Text, output: RunResult) -> bool: + found_info_string = False + for line in output.errlines: + if text in line: + found_info_string = True + return found_info_string + + +def test_data_validate_stories_with_max_history_zero(monkeypatch: MonkeyPatch): + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(help="Rasa commands") + data.add_subparser(subparsers, parents=[]) + + args = parser.parse_args(["data", "validate", "stories", "--max-history", 0]) + + async def mock_from_importer(importer: TrainingDataImporter) -> Validator: + return Mock() + + monkeypatch.setattr("rasa.validator.Validator.from_importer", mock_from_importer) + + with pytest.raises(argparse.ArgumentTypeError): + data.validate_files(args) + + +def test_validate_files_exit_early(): + with pytest.raises(SystemExit) as pytest_e: + args = { + "domain": "data/test_domains/duplicate_intents.yml", + "data": None, + "max_history": None, + } + data.validate_files(namedtuple("Args", args.keys())(*args.values())) + + assert pytest_e.type == SystemExit + assert pytest_e.value.code == 1 + + +def test_rasa_data_convert_to_yaml( + run_in_simple_project: Callable[..., RunResult], run: Callable[..., RunResult] +): + converted_data_folder = "converted_data" + os.mkdir(converted_data_folder) + + simple_nlu_md = """ + ## intent:greet + - hey + - hello + """ + + with open("data/nlu.md", "w") as f: + f.write(simple_nlu_md) + + simple_story_md = """ + ## happy path + * greet + - utter_greet + """ + + with open("data/stories.md", "w") as f: + f.write(simple_story_md) + + run_in_simple_project( + "data", + "convert", + "nlu", + "-f", + "yaml", + "--data", + "data", + "--out", + converted_data_folder, + ) + run_in_simple_project( + "data", + "convert", + "core", + "-f", + "yaml", + "--data", + "data", + "--out", + converted_data_folder, + ) + + assert len(os.listdir(converted_data_folder)) == 2 diff --git a/tests/cli/test_rasa_export.py b/tests/cli/test_rasa_export.py new file mode 100644 index 000000000000..2a5e0380a919 --- /dev/null +++ b/tests/cli/test_rasa_export.py @@ -0,0 +1,263 @@ +import argparse +from pathlib import Path +from typing import Callable, Optional, Dict, Text, List, Tuple, Any +from unittest.mock import Mock + +import pytest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.pytester import RunResult + +import rasa.core.utils as rasa_core_utils +from rasa.cli import export +from rasa.core.brokers.pika import PikaEventBroker +from rasa.core.events import UserUttered +from rasa.core.trackers import DialogueStateTracker +from rasa.exceptions import PublishingError, NoEventsToMigrateError +from tests.conftest import ( + MockExporter, + random_user_uttered_event, + write_endpoint_config_to_yaml, +) + + +def test_export_help(run: Callable[..., RunResult]): + output = run("export", "--help") + + help_text = """usage: rasa export [-h] [-v] [-vv] [--quiet] [--endpoints ENDPOINTS] + [--minimum-timestamp MINIMUM_TIMESTAMP] + [--maximum-timestamp MAXIMUM_TIMESTAMP] + [--conversation-ids CONVERSATION_IDS]""" + + lines = help_text.split("\n") + + for i, line in enumerate(lines): + assert output.outlines[i] == line + + +@pytest.mark.parametrize( + "minimum_timestamp,maximum_timestamp", + [(2, 3), (None, 5.5), (None, None), (5, None)], +) +def test_validate_timestamp_options( + minimum_timestamp: Optional[float], maximum_timestamp: Optional[float] +): + args = argparse.Namespace() + args.minimum_timestamp = ( + str(minimum_timestamp) if minimum_timestamp is not None else None + ) + args.maximum_timestamp = ( + str(maximum_timestamp) if maximum_timestamp is not None else None + ) + + # no error is raised + # noinspection PyProtectedMember + export._assert_max_timestamp_is_greater_than_min_timestamp(args) + + +def test_validate_timestamp_options_with_invalid_timestamps(): + args = argparse.Namespace(minimum_timestamp=3, maximum_timestamp=2) + with pytest.raises(SystemExit): + # noinspection PyProtectedMember + export._assert_max_timestamp_is_greater_than_min_timestamp(args) + + +# noinspection PyProtectedMember +def test_get_event_broker_and_tracker_store_from_endpoint_config(tmp_path: Path): + # write valid config to file + endpoints_path = write_endpoint_config_to_yaml( + tmp_path, {"event_broker": {"type": "sql"}, "tracker_store": {"type": "sql"}} + ) + + available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path) + + # fetching the event broker is successful + assert export._get_event_broker(available_endpoints) + assert export._get_tracker_store(available_endpoints) + + +# noinspection PyProtectedMember +def test_get_event_broker_from_endpoint_config_error_exit(tmp_path: Path): + # write config without event broker to file + endpoints_path = write_endpoint_config_to_yaml( + tmp_path, {"tracker_store": {"type": "sql"}} + ) + + available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path) + + with pytest.raises(SystemExit): + assert export._get_event_broker(available_endpoints) + + +def test_get_tracker_store_from_endpoint_config_error_exit(tmp_path: Path): + # write config without event broker to file + endpoints_path = write_endpoint_config_to_yaml(tmp_path, {}) + + available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path) + + with pytest.raises(SystemExit): + # noinspection PyProtectedMember + assert export._get_tracker_store(available_endpoints) + + +@pytest.mark.parametrize( + "requested_ids,expected", + [("id1", ["id1"]), ("id1,id2", ["id1", "id2"]), (None, None), ("", None)], +) +def test_get_requested_conversation_ids( + requested_ids: Optional[Text], expected: Optional[List[Text]] +): + # noinspection PyProtectedMember + assert export._get_requested_conversation_ids(requested_ids) == expected + + +def test_prepare_pika_event_broker(): + # mock a pika event broker + pika_broker = Mock(spec=PikaEventBroker) + + # patch the spinner so we can execute the `_prepare_pika_producer()` function + pika_broker.is_ready.return_value = True + + # noinspection PyProtectedMember + export._prepare_event_broker(pika_broker) + + # the attributes are set as expected + assert not pika_broker.should_keep_unpublished_messages + assert pika_broker.raise_on_failure + + +@pytest.mark.parametrize( + "current_timestamp,maximum_timestamp,endpoints_path,requested_ids,expected", + [ + (1.0, None, None, None, "--minimum-timestamp 1.0"), + (1.0, None, None, ["5", "6"], "--minimum-timestamp 1.0 --conversation-ids 5,6"), + (1.0, 3.4, None, None, "--minimum-timestamp 1.0 --maximum-timestamp 3.4"), + ( + 1.0, + 2.5, + "a.yml", + None, + "--endpoints a.yml --minimum-timestamp 1.0 --maximum-timestamp 2.5", + ), + ( + 1.0, + 2.5, + "a.yml", + ["1", "2", "3"], + ( + "--endpoints a.yml --minimum-timestamp 1.0 --maximum-timestamp 2.5 " + "--conversation-ids 1,2,3" + ), + ), + ], +) +def test_get_continuation_command( + current_timestamp: float, + maximum_timestamp: Optional[float], + endpoints_path: Optional[Text], + requested_ids: Optional[List[Text]], + expected: Text, +): + exporter = MockExporter() + exporter.maximum_timestamp = maximum_timestamp + exporter.endpoints_path = endpoints_path + exporter.requested_conversation_ids = requested_ids + + # noinspection PyProtectedMember + assert ( + export._get_continuation_command(exporter, current_timestamp) + == f"rasa export {expected}" + ) + + +def _add_conversation_id_to_event(event: Dict, conversation_id: Text): + event["sender_id"] = conversation_id + + +def prepare_namespace_and_mocked_tracker_store_with_events( + temporary_path: Path, monkeypatch: MonkeyPatch +) -> Tuple[List[UserUttered], argparse.Namespace]: + endpoints_path = write_endpoint_config_to_yaml( + temporary_path, + {"event_broker": {"type": "pika"}, "tracker_store": {"type": "sql"}}, + ) + + # export these conversation IDs + all_conversation_ids = ["id-1", "id-2", "id-3"] + + requested_conversation_ids = ["id-1", "id-2"] + + # create namespace with a set of cmdline arguments + namespace = argparse.Namespace( + endpoints=endpoints_path, + conversation_ids=",".join(requested_conversation_ids), + minimum_timestamp=1.0, + maximum_timestamp=10.0, + ) + + # prepare events from different senders and different timestamps + events = [random_user_uttered_event(timestamp) for timestamp in [1, 2, 3, 4, 11, 5]] + events_for_conversation_id = { + all_conversation_ids[0]: [events[0], events[1]], + all_conversation_ids[1]: [events[2], events[3], events[4]], + all_conversation_ids[2]: [events[5]], + } + + def _get_tracker(conversation_id: Text) -> DialogueStateTracker: + return DialogueStateTracker.from_events( + conversation_id, events_for_conversation_id[conversation_id] + ) + + # mock tracker store + tracker_store = Mock() + tracker_store.keys.return_value = all_conversation_ids + tracker_store.retrieve.side_effect = _get_tracker + + monkeypatch.setattr(export, "_get_tracker_store", lambda _: tracker_store) + + return events, namespace + + +def test_export_trackers(tmp_path: Path, monkeypatch: MonkeyPatch): + events, namespace = prepare_namespace_and_mocked_tracker_store_with_events( + tmp_path, monkeypatch + ) + + # mock event broker so we can check its `publish` method is called + event_broker = Mock() + event_broker.publish = Mock() + monkeypatch.setattr(export, "_get_event_broker", lambda _: event_broker) + + # run the export function + export.export_trackers(namespace) + + # check that only events 1, 2, 3, and 4 have been published + # event 6 was sent by `id-3` which was not requested, and event 5 + # lies outside the requested time range + calls = event_broker.publish.mock_calls + + # only four events were published (i.e. `publish()` method was called four times) + assert len(calls) == 4 + + # call objects are tuples of (name, pos. args, kwargs) + # args itself is a tuple, and we want to access the first one, hence `call[1][0]` + # check that events 1-4 were published + assert all( + any(call[1][0]["text"] == event.text for call in calls) for event in events[:4] + ) + + +@pytest.mark.parametrize("exception", [NoEventsToMigrateError, PublishingError(123)]) +def test_export_trackers_publishing_exceptions( + tmp_path: Path, monkeypatch: MonkeyPatch, exception: Exception +): + events, namespace = prepare_namespace_and_mocked_tracker_store_with_events( + tmp_path, monkeypatch + ) + + # mock event broker so we can check its `publish` method is called + event_broker = Mock() + event_broker.publish.side_effect = exception + monkeypatch.setattr(export, "_get_event_broker", lambda _: event_broker) + + with pytest.raises(SystemExit): + export.export_trackers(namespace) diff --git a/tests/cli/test_rasa_init.py b/tests/cli/test_rasa_init.py index 64949f7d7ac6..920ad9933a43 100644 --- a/tests/cli/test_rasa_init.py +++ b/tests/cli/test_rasa_init.py @@ -1,22 +1,46 @@ import os +from pathlib import Path +from typing import Callable +from _pytest.pytester import RunResult -def test_init(run): - run("init", "--no-prompt", "--quiet") +def test_init_using_init_dir_option(run_with_stdin: Callable[..., RunResult]): + os.makedirs("./workspace") + run_with_stdin( + "init", "--quiet", "--init-dir", "./workspace", stdin=b"N" + ) # avoid training an initial model - assert os.path.exists("actions.py") - assert os.path.exists("domain.yml") - assert os.path.exists("config.yml") - assert os.path.exists("credentials.yml") - assert os.path.exists("endpoints.yml") - assert os.path.exists("models") - assert os.path.exists("data/nlu.md") - assert os.path.exists("data/stories.md") + required_files = [ + "actions.py", + "domain.yml", + "config.yml", + "credentials.yml", + "endpoints.yml", + "data/nlu.yml", + "data/stories.yml", + "data/rules.yml", + ] + assert all((Path("workspace") / file).exists() for file in required_files) -def test_init_help(run): +def test_not_found_init_path(run: Callable[..., RunResult]): + output = run("init", "--no-prompt", "--quiet", "--init-dir", "./workspace") + + assert ( + output.outlines[-1] + == "\033[91mProject init path './workspace' not found.\033[0m" + ) + + +def test_init_help(run: Callable[..., RunResult]): output = run("init", "--help") assert ( - output.outlines[0] == "usage: rasa init [-h] [-v] [-vv] [--quiet] [--no-prompt]" + output.outlines[0] + == "usage: rasa init [-h] [-v] [-vv] [--quiet] [--no-prompt] [--init-dir INIT_DIR]" ) + + +def test_user_asked_to_train_model(run_with_stdin: Callable[..., RunResult]): + run_with_stdin("init", stdin=b"\nYN") + assert not os.path.exists("models") diff --git a/tests/cli/test_rasa_interactive.py b/tests/cli/test_rasa_interactive.py index 7840edd7c4a2..7ae7164e2bd7 100644 --- a/tests/cli/test_rasa_interactive.py +++ b/tests/cli/test_rasa_interactive.py @@ -1,11 +1,25 @@ -def test_interactive_help(run): +import argparse +import pytest +from typing import Callable, Text +from unittest.mock import Mock, ANY + +from _pytest.monkeypatch import MonkeyPatch +from _pytest.pytester import RunResult + +import rasa +from rasa.cli import interactive, train +from tests.conftest import DEFAULT_NLU_DATA + + +def test_interactive_help(run: Callable[..., RunResult]): output = run("interactive", "--help") - help_text = """usage: rasa interactive [-h] [-v] [-vv] [--quiet] [--e2e] [-m MODEL] + help_text = """usage: rasa interactive [-h] [-v] [-vv] [--quiet] [--e2e] [-p PORT] [-m MODEL] [--data DATA [DATA ...]] [--skip-visualization] + [--conversation-id CONVERSATION_ID] [--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN] [--out OUT] [--augmentation AUGMENTATION] - [--debug-plots] [--dump-stories] [--force] + [--debug-plots] [--force] [--persist-nlu-data] {core} ... [model-as-positional-argument]""" lines = help_text.split("\n") @@ -14,17 +28,165 @@ def test_interactive_help(run): assert output.outlines[i] == line -def test_interactive_core_help(run): +def test_interactive_core_help(run: Callable[..., RunResult]): output = run("interactive", "core", "--help") help_text = """usage: rasa interactive core [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES] - [--skip-visualization] [--endpoints ENDPOINTS] - [-c CONFIG] [-d DOMAIN] [--out OUT] - [--augmentation AUGMENTATION] [--debug-plots] - [--dump-stories] + [--skip-visualization] + [--conversation-id CONVERSATION_ID] + [--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN] + [--out OUT] [--augmentation AUGMENTATION] + [--debug-plots] [-p PORT] [model-as-positional-argument]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert output.outlines[i] == line + + +def test_pass_arguments_to_rasa_train( + default_stack_config: Text, monkeypatch: MonkeyPatch +) -> None: + # Create parser + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + # Parse interactive command + args = parser.parse_args(["interactive", "--config", default_stack_config]) + interactive._set_not_required_args(args) + + # Mock actual training + mock = Mock() + monkeypatch.setattr(rasa, "train", mock.method) + + # If the `Namespace` object does not have all required fields this will throw + train.train(args) + + # Assert `train` was actually called + mock.method.assert_called_once() + + +def test_train_called_when_no_model_passed( + default_stack_config: Text, monkeypatch: MonkeyPatch +) -> None: + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + args = parser.parse_args( + [ + "interactive", + "--config", + default_stack_config, + "--data", + "examples/moodbot/data", + ] + ) + interactive._set_not_required_args(args) + + # Mock actual training and interactive learning methods + mock = Mock() + monkeypatch.setattr(train, "train", mock.train_model) + monkeypatch.setattr( + interactive, "perform_interactive_learning", mock.perform_interactive_learning + ) + + interactive.interactive(args) + mock.train_model.assert_called_once() + + +def test_train_core_called_when_no_model_passed_and_core( + default_stack_config: Text, monkeypatch: MonkeyPatch +) -> None: + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + args = parser.parse_args( + [ + "interactive", + "core", + "--config", + default_stack_config, + "--stories", + "examples/moodbot/data/stories.yml", + "--domain", + "examples/moodbot/domain.yml", + ] + ) + interactive._set_not_required_args(args) + + # Mock actual training and interactive learning methods + mock = Mock() + monkeypatch.setattr(train, "train_core", mock.train_core) + monkeypatch.setattr( + interactive, "perform_interactive_learning", mock.perform_interactive_learning + ) + + interactive.interactive(args) + mock.train_core.assert_called_once() + + +def test_no_interactive_without_core_data( + default_stack_config: Text, monkeypatch: MonkeyPatch +) -> None: + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + args = parser.parse_args( + ["interactive", "--config", default_stack_config, "--data", DEFAULT_NLU_DATA,] + ) + interactive._set_not_required_args(args) + + mock = Mock() + monkeypatch.setattr(train, "train", mock.train_model) + monkeypatch.setattr( + interactive, "perform_interactive_learning", mock.perform_interactive_learning + ) + + with pytest.raises(SystemExit): + interactive.interactive(args) + + mock.train_model.assert_not_called() + mock.perform_interactive_learning.assert_not_called() + + +def test_pass_conversation_id_to_interactive_learning(monkeypatch: MonkeyPatch): + from rasa.core.train import do_interactive_learning + from rasa.core.training import interactive as interactive_learning + + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + expected_conversation_id = "🎁" + args = parser.parse_args( + [ + "interactive", + "--conversation-id", + expected_conversation_id, + "--skip-visualization", + ] + ) + + _serve_application = Mock() + monkeypatch.setattr(interactive_learning, "_serve_application", _serve_application) + + do_interactive_learning(args, Mock()) + + _serve_application.assert_called_once_with( + ANY, ANY, True, expected_conversation_id, 5005 + ) + + +def test_generate_conversation_id_for_interactive_learning(monkeypatch: MonkeyPatch): + parser = argparse.ArgumentParser() + sub_parser = parser.add_subparsers() + interactive.add_subparser(sub_parser, []) + + args = parser.parse_args(["interactive"]) + + assert args.conversation_id diff --git a/tests/cli/test_rasa_run.py b/tests/cli/test_rasa_run.py index cba9cb7fb26e..71f0a0762c82 100644 --- a/tests/cli/test_rasa_run.py +++ b/tests/cli/test_rasa_run.py @@ -1,28 +1,30 @@ import os -import shutil +from typing import Callable +from _pytest.pytester import RunResult -def test_run_does_not_start(run_in_default_project): +def test_run_does_not_start(run_in_simple_project: Callable[..., RunResult]): os.remove("domain.yml") - shutil.rmtree("models") # the server should not start as no model is configured - output = run_in_default_project("run") + output = run_in_simple_project("run") assert "No model found." in output.outlines[0] -def test_run_help(run): +def test_run_help(run: Callable[..., RunResult]): output = run("run", "--help") help_text = """usage: rasa run [-h] [-v] [-vv] [--quiet] [-m MODEL] [--log-file LOG_FILE] [--endpoints ENDPOINTS] [-p PORT] [-t AUTH_TOKEN] [--cors [CORS [CORS ...]]] [--enable-api] + [--response-timeout RESPONSE_TIMEOUT] [--remote-storage REMOTE_STORAGE] [--ssl-certificate SSL_CERTIFICATE] - [--ssl-keyfile SSL_KEYFILE] [--ssl-password SSL_PASSWORD] - [--credentials CREDENTIALS] [--connector CONNECTOR] - [--jwt-secret JWT_SECRET] [--jwt-method JWT_METHOD] + [--ssl-keyfile SSL_KEYFILE] [--ssl-ca-file SSL_CA_FILE] + [--ssl-password SSL_PASSWORD] [--credentials CREDENTIALS] + [--connector CONNECTOR] [--jwt-secret JWT_SECRET] + [--jwt-method JWT_METHOD] {actions} ... [model-as-positional-argument]""" lines = help_text.split("\n") @@ -31,7 +33,7 @@ def test_run_help(run): assert output.outlines[i] == line -def test_run_action_help(run): +def test_run_action_help(run: Callable[..., RunResult]): output = run("run", "actions", "--help") help_text = """usage: rasa run actions [-h] [-v] [-vv] [--quiet] [-p PORT] diff --git a/tests/cli/test_rasa_shell.py b/tests/cli/test_rasa_shell.py index b614dace782a..bb7f7cc99fc9 100644 --- a/tests/cli/test_rasa_shell.py +++ b/tests/cli/test_rasa_shell.py @@ -1,14 +1,21 @@ -def test_shell_help(run): +from typing import Callable +from _pytest.pytester import RunResult + + +def test_shell_help(run: Callable[..., RunResult]): output = run("shell", "--help") - help_text = """usage: rasa shell [-h] [-v] [-vv] [--quiet] [-m MODEL] [--log-file LOG_FILE] - [--endpoints ENDPOINTS] [-p PORT] [-t AUTH_TOKEN] - [--cors [CORS [CORS ...]]] [--enable-api] + help_text = """usage: rasa shell [-h] [-v] [-vv] [--quiet] + [--conversation-id CONVERSATION_ID] [-m MODEL] + [--log-file LOG_FILE] [--endpoints ENDPOINTS] [-p PORT] + [-t AUTH_TOKEN] [--cors [CORS [CORS ...]]] [--enable-api] + [--response-timeout RESPONSE_TIMEOUT] [--remote-storage REMOTE_STORAGE] [--ssl-certificate SSL_CERTIFICATE] - [--ssl-keyfile SSL_KEYFILE] [--ssl-password SSL_PASSWORD] - [--credentials CREDENTIALS] [--connector CONNECTOR] - [--jwt-secret JWT_SECRET] [--jwt-method JWT_METHOD] + [--ssl-keyfile SSL_KEYFILE] [--ssl-ca-file SSL_CA_FILE] + [--ssl-password SSL_PASSWORD] [--credentials CREDENTIALS] + [--connector CONNECTOR] [--jwt-secret JWT_SECRET] + [--jwt-method JWT_METHOD] {nlu} ... [model-as-positional-argument]""" lines = help_text.split("\n") @@ -17,7 +24,7 @@ def test_shell_help(run): assert output.outlines[i] == line -def test_shell_nlu_help(run): +def test_shell_nlu_help(run: Callable[..., RunResult]): output = run("shell", "nlu", "--help") help_text = """usage: rasa shell nlu [-h] [-v] [-vv] [--quiet] [-m MODEL] diff --git a/tests/cli/test_rasa_test.py b/tests/cli/test_rasa_test.py index 30cd9c4d2309..394475806f7e 100644 --- a/tests/cli/test_rasa_test.py +++ b/tests/cli/test_rasa_test.py @@ -1,56 +1,92 @@ import os from shutil import copyfile + +from rasa.core.test import CONFUSION_MATRIX_STORIES_FILE from rasa.constants import DEFAULT_RESULTS_PATH, RESULTS_FILE -from rasa.utils.io import list_files, write_yaml_file +from rasa.utils.io import list_files, write_yaml +from typing import Callable +from _pytest.pytester import RunResult -def test_test_core(run_in_default_project): - run_in_default_project("test", "core", "--stories", "data") +def test_test_core(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project("test", "core", "--stories", "data") assert os.path.exists("results") -def test_test(run_in_default_project): - run_in_default_project("test") +def test_test_core_no_plot(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project("test", "core", "--no-plot") + + assert not os.path.exists(f"results/{CONFUSION_MATRIX_STORIES_FILE}") + + +def test_test(run_in_simple_project_with_model: Callable[..., RunResult]): + run_in_simple_project_with_model("test") assert os.path.exists("results") - assert os.path.exists("results/hist.png") - assert os.path.exists("results/confmat.png") + assert os.path.exists("results/intent_histogram.png") + assert os.path.exists("results/intent_confusion_matrix.png") + +def test_test_no_plot(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project("test", "--no-plot") -def test_test_nlu(run_in_default_project): - run_in_default_project("test", "nlu", "--nlu", "data", "--successes") + assert not os.path.exists("results/intent_histogram.png") + assert not os.path.exists("results/intent_confusion_matrix.png") + assert not os.path.exists("results/story_confmat.pdf") - assert os.path.exists("results/hist.png") - assert os.path.exists("results/confmat.png") + +def test_test_nlu(run_in_simple_project_with_model: Callable[..., RunResult]): + run_in_simple_project_with_model("test", "nlu", "--nlu", "data", "--successes") + + assert os.path.exists("results/intent_histogram.png") + assert os.path.exists("results/intent_confusion_matrix.png") assert os.path.exists("results/intent_successes.json") -def test_test_nlu_cross_validation(run_in_default_project): - run_in_default_project( - "test", "nlu", "--cross-validation", "-c", "config.yml", "-f", "2" +def test_test_nlu_no_plot(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project("test", "nlu", "--no-plot") + + assert not os.path.exists("results/intent_histogram.png") + assert not os.path.exists("results/intent_confusion_matrix.png") + + +def test_test_nlu_cross_validation(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project( + "test", "nlu", "--cross-validation", "-c", "config.yml", "-f", "2", "-r", "1" ) - assert os.path.exists("results/hist.png") - assert os.path.exists("results/confmat.png") + assert os.path.exists("results/intent_histogram.png") + assert os.path.exists("results/intent_confusion_matrix.png") -def test_test_nlu_comparison(run_in_default_project): - copyfile("config.yml", "nlu-config.yml") +def test_test_nlu_comparison(run_in_simple_project: Callable[..., RunResult]): + copyfile("config.yml", "config-1.yml") - run_in_default_project( - "test", "nlu", "-c", "config.yml", "nlu-config.yml", "--run", "2" + run_in_simple_project( + "test", + "nlu", + "--config", + "config.yml", + "config-1.yml", + "--run", + "2", + "--percentages", + "75", + "25", ) assert os.path.exists("results/run_1") assert os.path.exists("results/run_2") -def test_test_core_comparison(run_in_default_project): +def test_test_core_comparison( + run_in_simple_project_with_model: Callable[..., RunResult] +): files = list_files("models") copyfile(files[0], "models/copy-model.tar.gz") - run_in_default_project( + run_in_simple_project_with_model( "test", "core", "-m", @@ -63,32 +99,25 @@ def test_test_core_comparison(run_in_default_project): assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE)) -def test_test_core_comparison_after_train(run_in_default_project): - write_yaml_file( - { - "language": "en", - "pipeline": "supervised_embeddings", - "policies": [{"name": "KerasPolicy"}], - }, - "config_1.yml", +def test_test_core_comparison_after_train( + run_in_simple_project: Callable[..., RunResult] +): + write_yaml( + {"language": "en", "policies": [{"name": "MemoizationPolicy"}]}, "config_1.yml" ) - write_yaml_file( - { - "language": "en", - "pipeline": "supervised_embeddings", - "policies": [{"name": "MemoizationPolicy"}], - }, - "config_2.yml", + write_yaml( + {"language": "en", "policies": [{"name": "MemoizationPolicy"}]}, "config_2.yml" ) - run_in_default_project( + + run_in_simple_project( "train", "core", "-c", "config_1.yml", "config_2.yml", "--stories", - "data/stories.md", + "data/stories.yml", "--runs", "2", "--percentages", @@ -104,7 +133,7 @@ def test_test_core_comparison_after_train(run_in_default_project): assert os.path.exists("comparison_models/run_1") assert os.path.exists("comparison_models/run_2") - run_in_default_project( + run_in_simple_project( "test", "core", "-m", @@ -120,17 +149,16 @@ def test_test_core_comparison_after_train(run_in_default_project): ) -def test_test_help(run): +def test_test_help(run: Callable[..., RunResult]): output = run("test", "--help") help_text = """usage: rasa test [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES] - [--max-stories MAX_STORIES] [--e2e] [--endpoints ENDPOINTS] + [--max-stories MAX_STORIES] [--endpoints ENDPOINTS] [--fail-on-prediction-errors] [--url URL] - [--evaluate-model-directory] [-u NLU] [--out OUT] - [--successes] [--no-errors] [--histogram HISTOGRAM] - [--confmat CONFMAT] [-c CONFIG [CONFIG ...]] - [--cross-validation] [-f FOLDS] [-r RUNS] - [-p PERCENTAGES [PERCENTAGES ...]] + [--evaluate-model-directory] [-u NLU] + [-c CONFIG [CONFIG ...]] [--cross-validation] [-f FOLDS] + [-r RUNS] [-p PERCENTAGES [PERCENTAGES ...]] [--no-plot] + [--successes] [--no-errors] [--out OUT] {core,nlu} ...""" lines = help_text.split("\n") @@ -139,14 +167,13 @@ def test_test_help(run): assert output.outlines[i] == line -def test_test_nlu_help(run): +def test_test_nlu_help(run: Callable[..., RunResult]): output = run("test", "nlu", "--help") help_text = """usage: rasa test nlu [-h] [-v] [-vv] [--quiet] [-m MODEL] [-u NLU] [--out OUT] - [--successes] [--no-errors] [--histogram HISTOGRAM] - [--confmat CONFMAT] [-c CONFIG [CONFIG ...]] - [--cross-validation] [-f FOLDS] [-r RUNS] - [-p PERCENTAGES [PERCENTAGES ...]]""" + [-c CONFIG [CONFIG ...]] [--cross-validation] [-f FOLDS] + [-r RUNS] [-p PERCENTAGES [PERCENTAGES ...]] [--no-plot] + [--successes] [--no-errors]""" lines = help_text.split("\n") @@ -154,14 +181,15 @@ def test_test_nlu_help(run): assert output.outlines[i] == line -def test_test_core_help(run): +def test_test_core_help(run: Callable[..., RunResult]): output = run("test", "core", "--help") help_text = """usage: rasa test core [-h] [-v] [-vv] [--quiet] [-m MODEL [MODEL ...]] [-s STORIES] [--max-stories MAX_STORIES] [--out OUT] [--e2e] [--endpoints ENDPOINTS] [--fail-on-prediction-errors] [--url URL] - [--evaluate-model-directory]""" + [--evaluate-model-directory] [--no-plot] [--successes] + [--no-errors]""" lines = help_text.split("\n") diff --git a/tests/cli/test_rasa_train.py b/tests/cli/test_rasa_train.py index 358569fe0b2f..44d903d62246 100644 --- a/tests/cli/test_rasa_train.py +++ b/tests/cli/test_rasa_train.py @@ -1,11 +1,16 @@ import os -import shutil import tempfile +from pathlib import Path import pytest +from typing import Callable +from _pytest.pytester import RunResult from rasa import model +from rasa.nlu.model import Metadata +from rasa.nlu.training_data import training_data +# noinspection PyProtectedMember from rasa.cli.train import _get_valid_config from rasa.constants import ( CONFIG_MANDATORY_KEYS_CORE, @@ -15,10 +20,10 @@ import rasa.utils.io as io_utils -def test_train(run_in_default_project): +def test_train(run_in_simple_project: Callable[..., RunResult]): temp_dir = os.getcwd() - run_in_default_project( + run_in_simple_project( "train", "-c", "config.yml", @@ -36,21 +41,59 @@ def test_train(run_in_default_project): files = io_utils.list_files(os.path.join(temp_dir, "train_models")) assert len(files) == 1 assert os.path.basename(files[0]) == "test-model.tar.gz" + model_dir = model.get_model("train_models") + assert model_dir is not None + metadata = Metadata.load(os.path.join(model_dir, "nlu")) + assert metadata.get("training_data") is None + assert not os.path.exists( + os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH) + ) -def test_train_core_compare(run_in_default_project): +def test_train_persist_nlu_data(run_in_simple_project: Callable[..., RunResult]): temp_dir = os.getcwd() - io_utils.write_yaml_file( + run_in_simple_project( + "train", + "-c", + "config.yml", + "-d", + "domain.yml", + "--data", + "data", + "--out", + "train_models", + "--fixed-model-name", + "test-model", + "--persist-nlu-data", + ) + + assert os.path.exists(os.path.join(temp_dir, "train_models")) + files = io_utils.list_files(os.path.join(temp_dir, "train_models")) + assert len(files) == 1 + assert os.path.basename(files[0]) == "test-model.tar.gz" + model_dir = model.get_model("train_models") + assert model_dir is not None + metadata = Metadata.load(os.path.join(model_dir, "nlu")) + assert metadata.get("training_data") is not None + assert os.path.exists( + os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH) + ) + + +def test_train_core_compare(run_in_simple_project: Callable[..., RunResult]): + temp_dir = os.getcwd() + + io_utils.write_yaml( { "language": "en", "pipeline": "supervised_embeddings", - "policies": [{"name": "KerasPolicy"}], + "policies": [{"name": "MemoizationPolicy"}], }, "config_1.yml", ) - io_utils.write_yaml_file( + io_utils.write_yaml( { "language": "en", "pipeline": "supervised_embeddings", @@ -59,14 +102,14 @@ def test_train_core_compare(run_in_default_project): "config_2.yml", ) - run_in_default_project( + run_in_simple_project( "train", "core", "-c", "config_1.yml", "config_2.yml", "--stories", - "data/stories.md", + "data/stories.yml", "--out", "core_comparison_results", "--runs", @@ -90,10 +133,12 @@ def test_train_core_compare(run_in_default_project): assert model_files[0].endswith("tar.gz") -def test_train_no_domain_exists(run_in_default_project): +def test_train_no_domain_exists( + run_in_simple_project: Callable[..., RunResult] +) -> None: os.remove("domain.yml") - run_in_default_project( + run_in_simple_project( "train", "-c", "config.yml", @@ -116,7 +161,9 @@ def test_train_no_domain_exists(run_in_default_project): assert os.path.exists(metadata_path) -def test_train_skip_on_model_not_changed(run_in_default_project): +def test_train_skip_on_model_not_changed( + run_in_simple_project_with_model: Callable[..., RunResult] +): temp_dir = os.getcwd() assert os.path.exists(os.path.join(temp_dir, "models")) @@ -124,7 +171,7 @@ def test_train_skip_on_model_not_changed(run_in_default_project): assert len(files) == 1 file_name = files[0] - run_in_default_project("train") + run_in_simple_project_with_model("train") assert os.path.exists(os.path.join(temp_dir, "models")) files = io_utils.list_files(os.path.join(temp_dir, "models")) @@ -132,28 +179,28 @@ def test_train_skip_on_model_not_changed(run_in_default_project): assert file_name == files[0] -def test_train_force(run_in_default_project): +def test_train_force(run_in_simple_project_with_model: Callable[..., RunResult]): temp_dir = os.getcwd() assert os.path.exists(os.path.join(temp_dir, "models")) files = io_utils.list_files(os.path.join(temp_dir, "models")) assert len(files) == 1 - run_in_default_project("train", "--force") + run_in_simple_project_with_model("train", "--force") assert os.path.exists(os.path.join(temp_dir, "models")) files = io_utils.list_files(os.path.join(temp_dir, "models")) assert len(files) == 2 -def test_train_with_only_nlu_data(run_in_default_project): - temp_dir = os.getcwd() +def test_train_with_only_nlu_data(run_in_simple_project: Callable[..., RunResult]): + temp_dir = Path.cwd() - assert os.path.exists(os.path.join(temp_dir, "data/stories.md")) - os.remove(os.path.join(temp_dir, "data/stories.md")) - shutil.rmtree(os.path.join(temp_dir, "models")) + for core_file in ["stories.yml", "rules.yml"]: + assert (temp_dir / "data" / core_file).exists() + (temp_dir / "data" / core_file).unlink() - run_in_default_project("train", "--fixed-model-name", "test-model") + run_in_simple_project("train", "--fixed-model-name", "test-model") assert os.path.exists(os.path.join(temp_dir, "models")) files = io_utils.list_files(os.path.join(temp_dir, "models")) @@ -161,14 +208,13 @@ def test_train_with_only_nlu_data(run_in_default_project): assert os.path.basename(files[0]) == "test-model.tar.gz" -def test_train_with_only_core_data(run_in_default_project): +def test_train_with_only_core_data(run_in_simple_project: Callable[..., RunResult]): temp_dir = os.getcwd() - assert os.path.exists(os.path.join(temp_dir, "data/nlu.md")) - os.remove(os.path.join(temp_dir, "data/nlu.md")) - shutil.rmtree(os.path.join(temp_dir, "models")) + assert os.path.exists(os.path.join(temp_dir, "data/nlu.yml")) + os.remove(os.path.join(temp_dir, "data/nlu.yml")) - run_in_default_project("train", "--fixed-model-name", "test-model") + run_in_simple_project("train", "--fixed-model-name", "test-model") assert os.path.exists(os.path.join(temp_dir, "models")) files = io_utils.list_files(os.path.join(temp_dir, "models")) @@ -176,8 +222,8 @@ def test_train_with_only_core_data(run_in_default_project): assert os.path.basename(files[0]) == "test-model.tar.gz" -def test_train_core(run_in_default_project): - run_in_default_project( +def test_train_core(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project( "train", "core", "-c", @@ -196,10 +242,10 @@ def test_train_core(run_in_default_project): assert os.path.isfile("train_rasa_models/rasa-model.tar.gz") -def test_train_core_no_domain_exists(run_in_default_project): +def test_train_core_no_domain_exists(run_in_simple_project: Callable[..., RunResult]): os.remove("domain.yml") - run_in_default_project( + run_in_simple_project( "train", "core", "--config", @@ -218,31 +264,35 @@ def test_train_core_no_domain_exists(run_in_default_project): assert not os.path.isfile("train_rasa_models_no_domain/rasa-model.tar.gz") -def count_rasa_temp_files(): - count = 0 - for entry in os.scandir(tempfile.gettempdir()): - if not entry.is_dir(): - continue - - try: - for f in os.listdir(entry.path): - if f.endswith("_nlu.md") or f.endswith("_stories.md"): - count += 1 - except PermissionError: - # Ignore permission errors - pass - - return count - +def test_train_nlu(run_in_simple_project: Callable[..., RunResult]): + run_in_simple_project( + "train", + "nlu", + "-c", + "config.yml", + "--nlu", + "data/nlu.md", + "--out", + "train_models", + ) -def test_train_core_temp_files(run_in_default_project): - count = count_rasa_temp_files() - run_in_default_project("train", "core") - assert count == count_rasa_temp_files() + assert os.path.exists("train_models") + files = io_utils.list_files("train_models") + assert len(files) == 1 + assert os.path.basename(files[0]).startswith("nlu-") + model_dir = model.get_model("train_models") + assert model_dir is not None + metadata = Metadata.load(os.path.join(model_dir, "nlu")) + assert metadata.get("training_data") is None + assert not os.path.exists( + os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH) + ) -def test_train_nlu(run_in_default_project): - run_in_default_project( +def test_train_nlu_persist_nlu_data( + run_in_simple_project: Callable[..., RunResult] +) -> None: + run_in_simple_project( "train", "nlu", "-c", @@ -251,18 +301,20 @@ def test_train_nlu(run_in_default_project): "data/nlu.md", "--out", "train_models", + "--persist-nlu-data", ) assert os.path.exists("train_models") files = io_utils.list_files("train_models") assert len(files) == 1 assert os.path.basename(files[0]).startswith("nlu-") - - -def test_train_nlu_temp_files(run_in_default_project): - count = count_rasa_temp_files() - run_in_default_project("train", "nlu") - assert count == count_rasa_temp_files() + model_dir = model.get_model("train_models") + assert model_dir is not None + metadata = Metadata.load(os.path.join(model_dir, "nlu")) + assert metadata.get("training_data") is not None + assert os.path.exists( + os.path.join(model_dir, "nlu", training_data.DEFAULT_TRAINING_DATA_OUTPUT_PATH) + ) def test_train_help(run): @@ -271,7 +323,8 @@ def test_train_help(run): help_text = """usage: rasa train [-h] [-v] [-vv] [--quiet] [--data DATA [DATA ...]] [-c CONFIG] [-d DOMAIN] [--out OUT] [--augmentation AUGMENTATION] [--debug-plots] - [--dump-stories] [--fixed-model-name FIXED_MODEL_NAME] + [--num-threads NUM_THREADS] + [--fixed-model-name FIXED_MODEL_NAME] [--persist-nlu-data] [--force] {core,nlu} ...""" @@ -281,11 +334,13 @@ def test_train_help(run): assert output.outlines[i] == line -def test_train_nlu_help(run): +def test_train_nlu_help(run: Callable[..., RunResult]): output = run("train", "nlu", "--help") help_text = """usage: rasa train nlu [-h] [-v] [-vv] [--quiet] [-c CONFIG] [--out OUT] - [-u NLU] [--fixed-model-name FIXED_MODEL_NAME]""" + [-u NLU] [--num-threads NUM_THREADS] + [--fixed-model-name FIXED_MODEL_NAME] + [--persist-nlu-data]""" lines = help_text.split("\n") @@ -293,13 +348,12 @@ def test_train_nlu_help(run): assert output.outlines[i] == line -def test_train_core_help(run): +def test_train_core_help(run: Callable[..., RunResult]): output = run("train", "core", "--help") help_text = """usage: rasa train core [-h] [-v] [-vv] [--quiet] [-s STORIES] [-d DOMAIN] [-c CONFIG [CONFIG ...]] [--out OUT] - [--augmentation AUGMENTATION] [--debug-plots] - [--dump-stories] [--force] + [--augmentation AUGMENTATION] [--debug-plots] [--force] [--fixed-model-name FIXED_MODEL_NAME] [--percentages [PERCENTAGES [PERCENTAGES ...]]] [--runs RUNS]""" @@ -318,30 +372,44 @@ def test_train_core_help(run): "default_config": { "language": "en", "pipeline": "supervised", - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], }, "mandatory_keys": CONFIG_MANDATORY_KEYS_CORE, - "error": True, + "error": False, + }, + { + "config_data": { + "language": "en", + "pipeline": "supervised", + "policies": None, + }, + "default_config": { + "language": "en", + "pipeline": "supervised", + "policies": ["TEDPolicy", "FallbackPolicy"], + }, + "mandatory_keys": CONFIG_MANDATORY_KEYS_CORE, + "error": False, }, { "config_data": {}, "default_config": { "language": "en", "pipeline": "supervised", - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], }, "mandatory_keys": CONFIG_MANDATORY_KEYS, "error": True, }, { "config_data": { - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], "imports": "other-folder", }, "default_config": { "language": "en", "pipeline": "supervised", - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], }, "mandatory_keys": CONFIG_MANDATORY_KEYS_NLU, "error": True, @@ -350,7 +418,7 @@ def test_train_core_help(run): "config_data": None, "default_config": { "pipeline": "supervised", - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], }, "mandatory_keys": CONFIG_MANDATORY_KEYS_NLU, "error": True, @@ -360,7 +428,7 @@ def test_train_core_help(run): "default_config": { "language": "en", "pipeline": "supervised", - "policies": ["KerasPolicy", "FallbackPolicy"], + "policies": ["TEDPolicy", "FallbackPolicy"], }, "mandatory_keys": CONFIG_MANDATORY_KEYS, "error": False, @@ -369,7 +437,7 @@ def test_train_core_help(run): "config_data": None, "default_config": {"language": "en", "pipeline": "supervised"}, "mandatory_keys": CONFIG_MANDATORY_KEYS_CORE, - "error": True, + "error": False, }, { "config_data": None, @@ -380,17 +448,15 @@ def test_train_core_help(run): ], ) def test_get_valid_config(parameters): - import rasa.utils.io - config_path = None if parameters["config_data"] is not None: config_path = os.path.join(tempfile.mkdtemp(), "config.yml") - rasa.utils.io.write_yaml_file(parameters["config_data"], config_path) + io_utils.write_yaml(parameters["config_data"], config_path) default_config_path = None if parameters["default_config"] is not None: default_config_path = os.path.join(tempfile.mkdtemp(), "default-config.yml") - rasa.utils.io.write_yaml_file(parameters["default_config"], default_config_path) + io_utils.write_yaml(parameters["default_config"], default_config_path) if parameters["error"]: with pytest.raises(SystemExit): @@ -401,7 +467,7 @@ def test_get_valid_config(parameters): config_path, parameters["mandatory_keys"], default_config_path ) - config_data = rasa.utils.io.read_yaml_file(config_path) + config_data = io_utils.read_yaml_file(config_path) for k in parameters["mandatory_keys"]: assert k in config_data diff --git a/tests/cli/test_rasa_visualize.py b/tests/cli/test_rasa_visualize.py index 0aaffa1080b9..f69115c9529b 100644 --- a/tests/cli/test_rasa_visualize.py +++ b/tests/cli/test_rasa_visualize.py @@ -1,4 +1,8 @@ -def test_visualize_help(run): +from typing import Callable +from _pytest.pytester import RunResult + + +def test_visualize_help(run: Callable[..., RunResult]): output = run("visualize", "--help") help_text = """usage: rasa visualize [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] diff --git a/tests/cli/test_rasa_x.py b/tests/cli/test_rasa_x.py index 74ebf7db1567..888271726867 100644 --- a/tests/cli/test_rasa_x.py +++ b/tests/cli/test_rasa_x.py @@ -1,24 +1,32 @@ +from pathlib import Path + import pytest +from typing import Callable, Dict +from _pytest.pytester import RunResult + + from aioresponses import aioresponses import rasa.utils.io as io_utils from rasa.cli import x from rasa.utils.endpoints import EndpointConfig +from rasa.core.utils import AvailableEndpoints -def test_x_help(run): +def test_x_help(run: Callable[..., RunResult]): output = run("x", "--help") - help_text = """usage: rasa x [-h] [-v] [-vv] [--quiet] [-m MODEL] [--data DATA] [--no-prompt] - [--production] [--rasa-x-port RASA_X_PORT] + help_text = """usage: rasa x [-h] [-v] [-vv] [--quiet] [-m MODEL] [--data DATA] [-c CONFIG] + [--no-prompt] [--production] [--rasa-x-port RASA_X_PORT] [--config-endpoint CONFIG_ENDPOINT] [--log-file LOG_FILE] [--endpoints ENDPOINTS] [-p PORT] [-t AUTH_TOKEN] [--cors [CORS [CORS ...]]] [--enable-api] + [--response-timeout RESPONSE_TIMEOUT] [--remote-storage REMOTE_STORAGE] [--ssl-certificate SSL_CERTIFICATE] [--ssl-keyfile SSL_KEYFILE] - [--ssl-password SSL_PASSWORD] [--credentials CREDENTIALS] - [--connector CONNECTOR] [--jwt-secret JWT_SECRET] - [--jwt-method JWT_METHOD]""" + [--ssl-ca-file SSL_CA_FILE] [--ssl-password SSL_PASSWORD] + [--credentials CREDENTIALS] [--connector CONNECTOR] + [--jwt-secret JWT_SECRET] [--jwt-method JWT_METHOD]""" lines = help_text.split("\n") @@ -26,11 +34,10 @@ def test_x_help(run): assert output.outlines[i] == line -def test_prepare_credentials_for_rasa_x_if_rasa_channel_not_given(tmpdir_factory): - directory = tmpdir_factory.mktemp("directory") - credentials_path = str(directory / "credentials.yml") +def test_prepare_credentials_for_rasa_x_if_rasa_channel_not_given(tmpdir: Path): + credentials_path = str(tmpdir / "credentials.yml") - io_utils.write_yaml_file({}, credentials_path) + io_utils.write_yaml({}, credentials_path) tmp_credentials = x._prepare_credentials_for_rasa_x( credentials_path, "http://localhost:5002" @@ -41,15 +48,14 @@ def test_prepare_credentials_for_rasa_x_if_rasa_channel_not_given(tmpdir_factory assert actual["rasa"]["url"] == "http://localhost:5002" -def test_prepare_credentials_if_already_valid(tmpdir_factory): - directory = tmpdir_factory.mktemp("directory") - credentials_path = str(directory / "credentials.yml") +def test_prepare_credentials_if_already_valid(tmpdir: Path): + credentials_path = str(tmpdir / "credentials.yml") credentials = { "rasa": {"url": "my-custom-url"}, "another-channel": {"url": "some-url"}, } - io_utils.write_yaml_file(credentials, credentials_path) + io_utils.write_yaml(credentials, credentials_path) x._prepare_credentials_for_rasa_x(credentials_path) @@ -58,10 +64,10 @@ def test_prepare_credentials_if_already_valid(tmpdir_factory): assert actual == credentials -def test_if_endpoint_config_is_valid_in_local_mode(): - config = EndpointConfig(type="sql", dialect="sqlite", db=x.DEFAULT_EVENTS_DB) +def test_if_default_endpoint_config_is_valid_in_local_mode(): + event_broker_endpoint = x._get_event_broker_endpoint(None) - assert x._is_correct_event_broker(config) + assert x._is_correct_event_broker(event_broker_endpoint) @pytest.mark.parametrize( @@ -72,11 +78,57 @@ def test_if_endpoint_config_is_valid_in_local_mode(): {"type": "sql", "dialect": "sqlite", "db": "some.db"}, ], ) -def test_if_endpoint_config_is_invalid_in_local_mode(kwargs): +def test_if_endpoint_config_is_invalid_in_local_mode(kwargs: Dict): config = EndpointConfig(**kwargs) assert not x._is_correct_event_broker(config) +def test_overwrite_model_server_url(): + endpoint_config = EndpointConfig(url="http://testserver:5002/models/default@latest") + endpoints = AvailableEndpoints(model=endpoint_config) + x._overwrite_endpoints_for_local_x(endpoints, "test", "http://localhost") + assert ( + endpoints.model.url + == "http://localhost/projects/default/models/tags/production" + ) + + +def test_overwrite_model_server_url_with_no_model_endpoint(): + endpoints = AvailableEndpoints() + x._overwrite_endpoints_for_local_x(endpoints, "test", "http://localhost") + assert ( + endpoints.model.url + == "http://localhost/projects/default/models/tags/production" + ) + + +def test_reuse_wait_time_between_pulls(): + test_wait_time = 5 + endpoint_config = EndpointConfig( + url="http://localhost:5002/models/default@latest", + wait_time_between_pulls=test_wait_time, + ) + endpoints = AvailableEndpoints(model=endpoint_config) + assert endpoints.model.kwargs["wait_time_between_pulls"] == test_wait_time + + +def test_default_wait_time_between_pulls(): + endpoint_config = EndpointConfig(url="http://localhost:5002/models/default@latest") + endpoints = AvailableEndpoints(model=endpoint_config) + x._overwrite_endpoints_for_local_x(endpoints, "test", "http://localhost") + assert endpoints.model.kwargs["wait_time_between_pulls"] == 2 + + +def test_default_model_server_url(): + endpoint_config = EndpointConfig() + endpoints = AvailableEndpoints(model=endpoint_config) + x._overwrite_endpoints_for_local_x(endpoints, "test", "http://localhost") + assert ( + endpoints.model.url + == "http://localhost/projects/default/models/tags/production" + ) + + async def test_pull_runtime_config_from_server(): config_url = "http://example.com/api/config?token=token" credentials = "rasa: http://example.com:5002/api" @@ -97,7 +149,5 @@ async def test_pull_runtime_config_from_server(): config_url, 1, 0 ) - with open(endpoints_path) as f: - assert f.read() == endpoint_config - with open(credentials_path) as f: - assert f.read() == credentials + assert io_utils.read_file(endpoints_path) == endpoint_config + assert io_utils.read_file(credentials_path) == credentials diff --git a/tests/cli/test_utils.py b/tests/cli/test_utils.py index 8aa535b5c4bf..6257da93f10f 100644 --- a/tests/cli/test_utils.py +++ b/tests/cli/test_utils.py @@ -1,4 +1,7 @@ +import contextlib import logging +import os +import pathlib import sys import tempfile @@ -12,39 +15,56 @@ ) +@contextlib.contextmanager +def make_actions_subdir(): + """Create a subdir called actions to test model argument handling.""" + with tempfile.TemporaryDirectory() as tempdir: + cwd = os.getcwd() + os.chdir(tempdir) + try: + (pathlib.Path(tempdir) / "actions").mkdir() + yield + finally: + os.chdir(cwd) + + @pytest.mark.parametrize( "argv", [ ["rasa", "run"], + ["rasa", "run", "actions"], ["rasa", "run", "core"], ["rasa", "interactive", "nlu", "--param", "xy"], ], ) def test_parse_last_positional_argument_as_model_path(argv): - test_model_dir = tempfile.gettempdir() - argv.append(test_model_dir) + with make_actions_subdir(): + test_model_dir = tempfile.gettempdir() + argv.append(test_model_dir) - sys.argv = argv.copy() - parse_last_positional_argument_as_model_path() + sys.argv = argv.copy() + parse_last_positional_argument_as_model_path() - assert sys.argv[-2] == "--model" - assert sys.argv[-1] == test_model_dir + assert sys.argv[-2] == "--model" + assert sys.argv[-1] == test_model_dir @pytest.mark.parametrize( "argv", [ ["rasa", "run"], + ["rasa", "run", "actions"], ["rasa", "run", "core"], ["rasa", "test", "nlu", "--param", "xy", "--model", "test"], ], ) def test_parse_no_positional_model_path_argument(argv): - sys.argv = argv.copy() + with make_actions_subdir(): + sys.argv = argv.copy() - parse_last_positional_argument_as_model_path() + parse_last_positional_argument_as_model_path() - assert sys.argv == argv + assert sys.argv == argv def test_validate_invalid_path(): @@ -71,14 +91,13 @@ def test_validate_with_none_if_default_is_valid(caplog: LogCaptureFixture): assert caplog.records == [] -def test_validate_with_invalid_directory_if_default_is_valid(caplog: LogCaptureFixture): +def test_validate_with_invalid_directory_if_default_is_valid(): tempdir = tempfile.mkdtemp() invalid_directory = "gcfhvjkb" - - with caplog.at_level(logging.WARNING, rasa.cli.utils.logger.name): + with pytest.warns(UserWarning) as record: assert get_validated_path(invalid_directory, "out", tempdir) == tempdir - - assert "'{}' does not exist".format(invalid_directory) in caplog.text + assert len(record) == 1 + assert "does not seem to exist" in record[0].message.args[0] def test_print_error_and_exit(): diff --git a/tests/conftest.py b/tests/conftest.py index 103286241a8e..25db21dbc816 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,45 +1,69 @@ +import asyncio +import random import pytest -import logging +import sys +import uuid + +from sanic.request import Request +from sanic.testing import SanicTestClient + +from typing import Iterator, Callable + +from _pytest.tmpdir import TempdirFactory +from pathlib import Path +from sanic import Sanic +from typing import Text, List, Optional, Dict, Any +from unittest.mock import Mock -from rasa.core.run import _create_app_without_api from rasa import server from rasa.core import config from rasa.core.agent import Agent, load_agent -from rasa.core.channels.channel import RestInput -from rasa.core.channels import channel +from rasa.core.brokers.broker import EventBroker +from rasa.core.channels import channel, RestInput +from rasa.core.domain import SessionConfig +from rasa.core.events import UserUttered +from rasa.core.exporter import Exporter +from rasa.core.policies import Policy from rasa.core.policies.memoization import AugmentedMemoizationPolicy +import rasa.core.run +from rasa.core.tracker_store import InMemoryTrackerStore, TrackerStore from rasa.model import get_model from rasa.train import train_async +from rasa.utils.common import TempDirectoryPath +import rasa.utils.io as io_utils from tests.core.conftest import ( - DEFAULT_STORIES_FILE, DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STACK_CONFIG, - DEFAULT_NLU_DATA, + DEFAULT_STORIES_FILE, END_TO_END_STORY_FILE, - MOODBOT_MODEL_PATH, + INCORRECT_NLU_DATA, ) DEFAULT_CONFIG_PATH = "rasa/cli/default_config.yml" +DEFAULT_NLU_DATA = "examples/moodbot/data/nlu.yml" + # we reuse a bit of pytest's own testing machinery, this should eventually come # from a separatedly installable pytest-cli plugin. pytest_plugins = ["pytester"] -@pytest.fixture(autouse=True) -def set_log_level_debug(caplog): - # Set the post-test log level to DEBUG for failing tests. For all tests - # (failing and successful), the live log level can be additionally set in - # `setup.cfg`. It should be set to WARNING. - caplog.set_level(logging.DEBUG) +# https://github.com/pytest-dev/pytest-asyncio/issues/68 +# this event_loop is used by pytest-asyncio, and redefining it +# is currently the only way of changing the scope of this fixture +@pytest.yield_fixture(scope="session") +def event_loop(request: Request) -> Iterator[asyncio.AbstractEventLoop]: + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() -@pytest.fixture -async def default_agent(tmpdir_factory) -> Agent: +@pytest.fixture(scope="session") +async def _trained_default_agent(tmpdir_factory: TempdirFactory) -> Agent: model_path = tmpdir_factory.mktemp("model").strpath agent = Agent( - "data/test_domains/default.yml", + "data/test_domains/default_with_slots.yml", policies=[AugmentedMemoizationPolicy(max_history=3)], ) @@ -49,73 +73,106 @@ async def default_agent(tmpdir_factory) -> Agent: return agent +def reset_conversation_state(agent: Agent) -> Agent: + # Clean tracker store after each test so tests don't affect each other + agent.tracker_store = InMemoryTrackerStore(agent.domain) + agent.domain.session_config = SessionConfig.default() + return agent + + +@pytest.fixture +async def default_agent(_trained_default_agent: Agent) -> Agent: + return reset_conversation_state(_trained_default_agent) + + @pytest.fixture(scope="session") -async def trained_moodbot_path(): - return await train_async( +async def trained_moodbot_path(trained_async) -> Text: + return await trained_async( domain="examples/moodbot/domain.yml", config="examples/moodbot/config.yml", training_files="examples/moodbot/data/", - output_path=MOODBOT_MODEL_PATH, ) @pytest.fixture(scope="session") -async def unpacked_trained_moodbot_path(trained_moodbot_path): +async def unpacked_trained_moodbot_path( + trained_moodbot_path: Text, +) -> TempDirectoryPath: return get_model(trained_moodbot_path) -@pytest.fixture -async def stack_agent(trained_rasa_model) -> Agent: +@pytest.fixture(scope="session") +async def stack_agent(trained_rasa_model: Text) -> Agent: return await load_agent(model_path=trained_rasa_model) -@pytest.fixture -async def core_agent(trained_core_model) -> Agent: +@pytest.fixture(scope="session") +async def core_agent(trained_core_model: Text) -> Agent: return await load_agent(model_path=trained_core_model) -@pytest.fixture -async def nlu_agent(trained_nlu_model) -> Agent: +@pytest.fixture(scope="session") +async def nlu_agent(trained_nlu_model: Text) -> Agent: return await load_agent(model_path=trained_nlu_model) @pytest.fixture(scope="session") -def default_domain_path(): +def default_domain_path() -> Text: return DEFAULT_DOMAIN_PATH_WITH_SLOTS @pytest.fixture(scope="session") -def default_stories_file(): +def default_stories_file() -> Text: return DEFAULT_STORIES_FILE @pytest.fixture(scope="session") -def default_stack_config(): +def default_stack_config() -> Text: return DEFAULT_STACK_CONFIG @pytest.fixture(scope="session") -def default_nlu_data(): +def default_nlu_data() -> Text: return DEFAULT_NLU_DATA @pytest.fixture(scope="session") -def end_to_end_story_file(): +def incorrect_nlu_data() -> Text: + return INCORRECT_NLU_DATA + + +@pytest.fixture(scope="session") +def end_to_end_story_file() -> Text: return END_TO_END_STORY_FILE @pytest.fixture(scope="session") -def default_config(): +def default_config() -> List[Policy]: return config.load(DEFAULT_CONFIG_PATH) -@pytest.fixture() +@pytest.fixture(scope="session") +def trained_async(tmpdir_factory: TempdirFactory) -> Callable: + async def _train( + *args: Any, output_path: Optional[Text] = None, **kwargs: Any + ) -> Optional[Text]: + if output_path is None: + output_path = str(tmpdir_factory.mktemp("models")) + + return await train_async(*args, output_path=output_path, **kwargs) + + return _train + + +@pytest.fixture(scope="session") async def trained_rasa_model( - default_domain_path, default_config, default_nlu_data, default_stories_file -): - clean_folder("models") - trained_stack_model_path = await train_async( - domain="data/test_domains/default.yml", + trained_async: Callable, + default_domain_path: Text, + default_nlu_data: Text, + default_stories_file: Text, +) -> Text: + trained_stack_model_path = await trained_async( + domain=default_domain_path, config=DEFAULT_STACK_CONFIG, training_files=[default_nlu_data, default_stories_file], ) @@ -123,11 +180,14 @@ async def trained_rasa_model( return trained_stack_model_path -@pytest.fixture() +@pytest.fixture(scope="session") async def trained_core_model( - default_domain_path, default_config, default_nlu_data, default_stories_file -): - trained_core_model_path = await train_async( + trained_async: Callable, + default_domain_path: Text, + default_nlu_data: Text, + default_stories_file: Text, +) -> Text: + trained_core_model_path = await trained_async( domain=default_domain_path, config=DEFAULT_STACK_CONFIG, training_files=[default_stories_file], @@ -136,11 +196,15 @@ async def trained_core_model( return trained_core_model_path -@pytest.fixture() +@pytest.fixture(scope="session") async def trained_nlu_model( - default_domain_path, default_config, default_nlu_data, default_stories_file -): - trained_nlu_model_path = await train_async( + trained_async: Callable, + default_domain_path: Text, + default_config: List[Policy], + default_nlu_data: Text, + default_stories_file: Text, +) -> Text: + trained_nlu_model_path = await trained_async( domain=default_domain_path, config=DEFAULT_STACK_CONFIG, training_files=[default_nlu_data], @@ -150,44 +214,78 @@ async def trained_nlu_model( @pytest.fixture -async def rasa_server(stack_agent): +async def rasa_server(stack_agent: Agent) -> Sanic: app = server.create_app(agent=stack_agent) channel.register([RestInput()], app, "/webhooks/") return app @pytest.fixture -async def rasa_core_server(core_agent): +async def rasa_core_server(core_agent: Agent) -> Sanic: app = server.create_app(agent=core_agent) channel.register([RestInput()], app, "/webhooks/") return app @pytest.fixture -async def rasa_nlu_server(nlu_agent): +async def rasa_nlu_server(nlu_agent: Agent) -> Sanic: app = server.create_app(agent=nlu_agent) channel.register([RestInput()], app, "/webhooks/") return app @pytest.fixture -async def rasa_server_secured(default_agent): +async def rasa_server_secured(default_agent: Agent) -> Sanic: app = server.create_app(agent=default_agent, auth_token="rasa", jwt_secret="core") channel.register([RestInput()], app, "/webhooks/") return app @pytest.fixture -async def rasa_server_without_api(): - app = _create_app_without_api() +async def rasa_server_without_api() -> Sanic: + app = rasa.core.run._create_app_without_api() channel.register([RestInput()], app, "/webhooks/") return app -def clean_folder(folder): - import os +def get_test_client(server: Sanic) -> SanicTestClient: + test_client = server.test_client + test_client.port = None + return test_client + + +def write_endpoint_config_to_yaml( + path: Path, data: Dict[Text, Any], endpoints_filename: Text = "endpoints.yml" +) -> Path: + endpoints_path = path / endpoints_filename + + # write endpoints config to file + io_utils.write_yaml(data, endpoints_path) + return endpoints_path + + +def random_user_uttered_event(timestamp: Optional[float] = None) -> UserUttered: + return UserUttered( + uuid.uuid4().hex, + timestamp=timestamp if timestamp is not None else random.random(), + ) + + +def pytest_runtest_setup(item) -> None: + if ( + "skip_on_windows" in [mark.name for mark in item.iter_markers()] + and sys.platform == "win32" + ): + pytest.skip("cannot run on Windows") + - if os.path.exists(folder): - import shutil +class MockExporter(Exporter): + """Mocked `Exporter` object.""" - shutil.rmtree(folder) + def __init__( + self, + tracker_store: TrackerStore = Mock(), + event_broker: EventBroker = Mock(), + endpoints_path: Text = "", + ) -> None: + super().__init__(tracker_store, event_broker, endpoints_path) diff --git a/tests/core/actions/__init__.py b/tests/core/actions/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/core/actions/test_forms.py b/tests/core/actions/test_forms.py new file mode 100644 index 000000000000..a29958dd493a --- /dev/null +++ b/tests/core/actions/test_forms.py @@ -0,0 +1,1010 @@ +import asyncio +from typing import Dict, Text, List, Optional, Any +from unittest.mock import Mock, ANY + +import pytest +from _pytest.monkeypatch import MonkeyPatch +from aioresponses import aioresponses + +from rasa.core.actions import action +from rasa.core.actions.action import ACTION_LISTEN_NAME, ActionExecutionRejection +from rasa.core.actions.forms import FormAction, REQUESTED_SLOT +from rasa.core.channels import CollectingOutputChannel +from rasa.core.domain import Domain +from rasa.core.events import ( + ActiveLoop, + SlotSet, + UserUttered, + ActionExecuted, + BotUttered, + Restarted, + Event, +) +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.core.trackers import DialogueStateTracker +from rasa.utils.endpoints import EndpointConfig + + +async def test_activate(): + tracker = DialogueStateTracker.from_events(sender_id="bla", evts=[]) + form_name = "my form" + action = FormAction(form_name, None) + slot_name = "num_people" + domain = f""" +forms: +- {form_name}: + {slot_name}: + - type: from_entity + entity: number +responses: + utter_ask_num_people: + - text: "How many people?" +""" + domain = Domain.from_yaml(domain) + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + assert events[:-1] == [ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, slot_name)] + assert isinstance(events[-1], BotUttered) + + +async def test_activate_with_prefilled_slot(): + slot_name = "num_people" + slot_value = 5 + + tracker = DialogueStateTracker.from_events( + sender_id="bla", evts=[SlotSet(slot_name, slot_value)] + ) + form_name = "my form" + action = FormAction(form_name, None) + + next_slot_to_request = "next slot to request" + domain = f""" + forms: + - {form_name}: + {slot_name}: + - type: from_entity + entity: {slot_name} + {next_slot_to_request}: + - type: from_text + slots: + {slot_name}: + type: unfeaturized + """ + domain = Domain.from_yaml(domain) + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + assert events == [ + ActiveLoop(form_name), + SlotSet(slot_name, slot_value), + SlotSet(REQUESTED_SLOT, next_slot_to_request), + ] + + +async def test_activate_and_immediate_deactivate(): + slot_name = "num_people" + slot_value = 5 + + tracker = DialogueStateTracker.from_events( + sender_id="bla", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered( + "haha", + {"name": "greet"}, + entities=[{"entity": slot_name, "value": slot_value}], + ), + ], + ) + form_name = "my form" + action = FormAction(form_name, None) + domain = f""" + forms: + - {form_name}: + {slot_name}: + - type: from_entity + entity: {slot_name} + slots: + {slot_name}: + type: unfeaturized + """ + domain = Domain.from_yaml(domain) + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + assert events == [ + ActiveLoop(form_name), + SlotSet(slot_name, slot_value), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ] + + +async def test_set_slot_and_deactivate(): + form_name = "my form" + slot_name = "num_people" + slot_value = "dasdasdfasdf" + events = [ + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, slot_name), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered(slot_value), + ] + tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) + + domain = f""" + forms: + - {form_name}: + {slot_name}: + - type: from_text + slots: + {slot_name}: + type: unfeaturized + """ + domain = Domain.from_yaml(domain) + + action = FormAction(form_name, None) + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + assert events == [ + SlotSet(slot_name, slot_value), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ] + + +async def test_action_rejection(): + form_name = "my form" + slot_to_fill = "some slot" + tracker = DialogueStateTracker.from_events( + sender_id="bla", + evts=[ + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, slot_to_fill), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": "greet"}), + ], + ) + form_name = "my form" + action = FormAction(form_name, None) + domain = f""" + forms: + - {form_name}: + {slot_to_fill}: + - type: from_entity + entity: some_entity + slots: + {slot_to_fill}: + type: unfeaturized + """ + domain = Domain.from_yaml(domain) + + with pytest.raises(ActionExecutionRejection): + await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + +@pytest.mark.parametrize( + "validate_return_events, expected_events", + [ + # Validate function returns SlotSet events for every slot to fill + ( + [ + {"event": "slot", "name": "num_people", "value": "so_clean"}, + {"event": "slot", "name": "num_tables", "value": 5}, + ], + [ + SlotSet("num_people", "so_clean"), + SlotSet("num_tables", 5), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + # Validate function returns extra Slot Event + ( + [ + {"event": "slot", "name": "num_people", "value": "so_clean"}, + {"event": "slot", "name": "some_other_slot", "value": 2}, + ], + [ + SlotSet("num_people", "so_clean"), + SlotSet("some_other_slot", 2), + SlotSet("num_tables", 5), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + # Validate function only validates one of the candidates + ( + [{"event": "slot", "name": "num_people", "value": "so_clean"}], + [ + SlotSet("num_people", "so_clean"), + SlotSet("num_tables", 5), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + # Validate function says slot is invalid + ( + [{"event": "slot", "name": "num_people", "value": None}], + [ + SlotSet("num_people", None), + SlotSet("num_tables", 5), + SlotSet(REQUESTED_SLOT, "num_people"), + ], + ), + # Validate function decides to request a slot which is not part of the default + # slot mapping + ( + [{"event": "slot", "name": "requested_slot", "value": "is_outside"}], + [ + SlotSet(REQUESTED_SLOT, "is_outside"), + SlotSet("num_tables", 5), + SlotSet("num_people", "hi"), + ], + ), + ], +) +async def test_validate_slots( + validate_return_events: List[Dict], expected_events: List[Event] +): + form_name = "my form" + slot_name = "num_people" + slot_value = "hi" + events = [ + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, slot_name), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered(slot_value, entities=[{"entity": "num_tables", "value": 5}]), + ] + tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) + + domain = f""" + slots: + {slot_name}: + type: unfeaturized + num_tables: + type: unfeaturized + forms: + - {form_name}: + {slot_name}: + - type: from_text + num_tables: + - type: from_entity + entity: num_tables + actions: + - validate_{form_name} + """ + domain = Domain.from_yaml(domain) + action_server_url = "http:/my-action-server:5055/webhook" + + with aioresponses() as mocked: + mocked.post(action_server_url, payload={"events": validate_return_events}) + + action_server = EndpointConfig(action_server_url) + action = FormAction(form_name, action_server) + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + assert events == expected_events + + +async def test_validate_slots_on_activation_with_other_action_after_user_utterance(): + form_name = "my form" + slot_name = "num_people" + slot_value = "hi" + events = [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered(slot_value, entities=[{"entity": "num_tables", "value": 5}]), + ActionExecuted("action_in_between"), + ] + tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) + + domain = f""" + slots: + {slot_name}: + type: unfeaturized + forms: + - {form_name}: + {slot_name}: + - type: from_text + actions: + - validate_{form_name} + """ + domain = Domain.from_yaml(domain) + action_server_url = "http:/my-action-server:5055/webhook" + + expected_slot_value = "✅" + with aioresponses() as mocked: + mocked.post( + action_server_url, + payload={ + "events": [ + {"event": "slot", "name": slot_name, "value": expected_slot_value} + ] + }, + ) + + action_server = EndpointConfig(action_server_url) + action = FormAction(form_name, action_server) + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert events == [ + ActiveLoop(form_name), + SlotSet(slot_name, expected_slot_value), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ] + + +def test_name_of_utterance(): + form_name = "another_form" + slot_name = "num_people" + full_utterance_name = f"utter_ask_{form_name}_{slot_name}" + + domain = f""" + forms: + - {form_name}: + {slot_name}: + - type: from_text + responses: + {full_utterance_name}: + - text: "How many people?" + """ + domain = Domain.from_yaml(domain) + + action_server_url = "http:/my-action-server:5055/webhook" + + with aioresponses(): + action_server = EndpointConfig(action_server_url) + action = FormAction(form_name, action_server) + + assert action._name_of_utterance(domain, slot_name) == full_utterance_name + assert ( + action._name_of_utterance(domain, "another_slot") + == "utter_ask_another_slot" + ) + + +def test_temporary_tracker(): + extra_slot = "some_slot" + sender_id = "test" + domain = Domain.from_yaml( + f""" slots: + {extra_slot}: + type: unfeaturized + """ + ) + + previous_events = [ActionExecuted(ACTION_LISTEN_NAME)] + old_tracker = DialogueStateTracker.from_events( + sender_id, previous_events, slots=domain.slots + ) + new_events = [Restarted()] + form_action = FormAction("some name", None) + temp_tracker = form_action._temporary_tracker(old_tracker, new_events, domain) + + assert extra_slot in temp_tracker.slots.keys() + assert list(temp_tracker.events) == [ + *previous_events, + ActionExecuted(form_action.name()), + *new_events, + ] + + +def test_extract_requested_slot_default(): + """Test default extraction of a slot value from entity with the same name.""" + form = FormAction("some form", None) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", entities=[{"entity": "some_slot", "value": "some_value"}] + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_requested_slot(tracker, Domain.empty()) + assert slot_values == {"some_slot": "some_value"} + + +@pytest.mark.parametrize( + "slot_mapping, expected_value", + [ + ( + {"type": "from_entity", "entity": "some_slot", "intent": "greet"}, + "some_value", + ), + ( + {"type": "from_intent", "intent": "greet", "value": "other_value"}, + "other_value", + ), + ({"type": "from_text"}, "bla"), + ({"type": "from_text", "intent": "greet"}, "bla"), + ({"type": "from_text", "not_intent": "other"}, "bla"), + ], +) +def test_extract_requested_slot_when_mapping_applies( + slot_mapping: Dict, expected_value: Text +): + form_name = "some_form" + entity_name = "some_slot" + form = FormAction(form_name, None) + + domain = Domain.from_dict({"forms": [{form_name: {entity_name: [slot_mapping]}}]}) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": entity_name, "value": "some_value"}], + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_requested_slot(tracker, domain) + # check that the value was extracted for correct intent + assert slot_values == {"some_slot": expected_value} + + +@pytest.mark.parametrize( + "slot_mapping", + [ + {"type": "from_entity", "entity": "some_slot", "intent": "some_intent"}, + {"type": "from_intent", "intent": "some_intent", "value": "some_value"}, + {"type": "from_text", "intent": "other"}, + {"type": "from_text", "not_intent": "greet"}, + {"type": "from_trigger_intent", "intent": "greet", "value": "value"}, + ], +) +def test_extract_requested_slot_mapping_does_not_apply(slot_mapping: Dict): + form_name = "some_form" + entity_name = "some_slot" + form = FormAction(form_name, None) + + domain = Domain.from_dict({"forms": [{form_name: {entity_name: [slot_mapping]}}]}) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": entity_name, "value": "some_value"}], + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_requested_slot(tracker, domain) + # check that the value was not extracted for incorrect intent + assert slot_values == {} + + +@pytest.mark.parametrize( + "trigger_slot_mapping, expected_value", + [ + ({"type": "from_trigger_intent", "intent": "greet", "value": "ten"}, "ten"), + ( + { + "type": "from_trigger_intent", + "intent": ["bye", "greet"], + "value": "tada", + }, + "tada", + ), + ], +) +async def test_trigger_slot_mapping_applies( + trigger_slot_mapping: Dict, expected_value: Text +): + form_name = "some_form" + entity_name = "some_slot" + slot_filled_by_trigger_mapping = "other_slot" + form = FormAction(form_name, None) + + domain = Domain.from_dict( + { + "forms": [ + { + form_name: { + entity_name: [ + { + "type": "from_entity", + "entity": entity_name, + "intent": "some_intent", + } + ], + slot_filled_by_trigger_mapping: [trigger_slot_mapping], + } + } + ] + } + ) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": entity_name, "value": "some_value"}], + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_other_slots(tracker, domain) + assert slot_values == {slot_filled_by_trigger_mapping: expected_value} + + +@pytest.mark.parametrize( + "trigger_slot_mapping", + [ + ({"type": "from_trigger_intent", "intent": "bye", "value": "ten"}), + ({"type": "from_trigger_intent", "not_intent": ["greet"], "value": "tada"}), + ], +) +async def test_trigger_slot_mapping_does_not_apply(trigger_slot_mapping: Dict): + form_name = "some_form" + entity_name = "some_slot" + slot_filled_by_trigger_mapping = "other_slot" + form = FormAction(form_name, None) + + domain = Domain.from_dict( + { + "forms": [ + { + form_name: { + entity_name: [ + { + "type": "from_entity", + "entity": entity_name, + "intent": "some_intent", + } + ], + slot_filled_by_trigger_mapping: [trigger_slot_mapping], + } + } + ] + } + ) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": entity_name, "value": "some_value"}], + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_other_slots(tracker, domain) + assert slot_values == {} + + +@pytest.mark.parametrize( + "mapping_not_intent, mapping_intent, mapping_role, mapping_group, entities, intent, expected_slot_values", + [ + ( + "some_intent", + None, + None, + None, + [{"entity": "some_entity", "value": "some_value"}], + "some_intent", + {}, + ), + ( + None, + "some_intent", + None, + None, + [{"entity": "some_entity", "value": "some_value"}], + "some_intent", + {"some_slot": "some_value"}, + ), + ( + "some_intent", + None, + None, + None, + [{"entity": "some_entity", "value": "some_value"}], + "some_other_intent", + {"some_slot": "some_value"}, + ), + ( + None, + None, + "some_role", + None, + [{"entity": "some_entity", "value": "some_value"}], + "some_intent", + {}, + ), + ( + None, + None, + "some_role", + None, + [{"entity": "some_entity", "value": "some_value", "role": "some_role"}], + "some_intent", + {"some_slot": "some_value"}, + ), + ( + None, + None, + None, + "some_group", + [{"entity": "some_entity", "value": "some_value"}], + "some_intent", + {}, + ), + ( + None, + None, + None, + "some_group", + [{"entity": "some_entity", "value": "some_value", "group": "some_group"}], + "some_intent", + {"some_slot": "some_value"}, + ), + ( + None, + None, + "some_role", + "some_group", + [ + { + "entity": "some_entity", + "value": "some_value", + "group": "some_group", + "role": "some_role", + } + ], + "some_intent", + {"some_slot": "some_value"}, + ), + ( + None, + None, + "some_role", + "some_group", + [{"entity": "some_entity", "value": "some_value", "role": "some_role"}], + "some_intent", + {}, + ), + ( + None, + None, + None, + None, + [ + { + "entity": "some_entity", + "value": "some_value", + "group": "some_group", + "role": "some_role", + } + ], + "some_intent", + {"some_slot": "some_value"}, + ), + ], +) +def test_extract_requested_slot_from_entity( + mapping_not_intent: Optional[Text], + mapping_intent: Optional[Text], + mapping_role: Optional[Text], + mapping_group: Optional[Text], + entities: List[Dict[Text, Any]], + intent: Text, + expected_slot_values: Dict[Text, Text], +): + """Test extraction of a slot value from entity with the different restrictions.""" + + form_name = "some form" + form = FormAction(form_name, None) + + mapping = form.from_entity( + entity="some_entity", + role=mapping_role, + group=mapping_group, + intent=mapping_intent, + not_intent=mapping_not_intent, + ) + domain = Domain.from_dict({"forms": [{form_name: {"some_slot": [mapping]}}]}) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", intent={"name": intent, "confidence": 1.0}, entities=entities + ), + ], + ) + + slot_values = form.extract_requested_slot(tracker, domain) + assert slot_values == expected_slot_values + + +def test_invalid_slot_mapping(): + form_name = "my_form" + form = FormAction(form_name, None) + slot_name = "test" + tracker = DialogueStateTracker.from_events( + "sender", [SlotSet(REQUESTED_SLOT, slot_name)] + ) + + domain = Domain.from_dict( + {"forms": [{form_name: {slot_name: [{"type": "invalid"}]}}]} + ) + + with pytest.raises(ValueError): + form.extract_requested_slot(tracker, domain) + + +@pytest.mark.parametrize( + "some_other_slot_mapping, some_slot_mapping, entities, intent, expected_slot_values", + [ + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "role": "some_role", + } + ], + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [ + { + "entity": "some_entity", + "value": "some_value", + "role": "some_other_role", + } + ], + "some_intent", + {}, + ), + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "role": "some_role", + } + ], + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [{"entity": "some_entity", "value": "some_value", "role": "some_role"}], + "some_intent", + {"some_other_slot": "some_value"}, + ), + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "group": "some_group", + } + ], + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [ + { + "entity": "some_entity", + "value": "some_value", + "group": "some_other_group", + } + ], + "some_intent", + {}, + ), + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "group": "some_group", + } + ], + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [{"entity": "some_entity", "value": "some_value", "group": "some_group"}], + "some_intent", + {"some_other_slot": "some_value"}, + ), + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "group": "some_group", + "role": "some_role", + } + ], + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [ + { + "entity": "some_entity", + "value": "some_value", + "role": "some_role", + "group": "some_group", + } + ], + "some_intent", + {"some_other_slot": "some_value"}, + ), + ( + [{"type": "from_entity", "intent": "some_intent", "entity": "some_entity"}], + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_other_entity", + } + ], + [{"entity": "some_entity", "value": "some_value"}], + "some_intent", + {}, + ), + ( + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_entity", + "role": "some_role", + } + ], + [ + { + "type": "from_entity", + "intent": "some_intent", + "entity": "some_other_entity", + } + ], + [{"entity": "some_entity", "value": "some_value", "role": "some_role"}], + "some_intent", + {}, + ), + ], +) +def test_extract_other_slots_with_entity( + some_other_slot_mapping: List[Dict[Text, Any]], + some_slot_mapping: List[Dict[Text, Any]], + entities: List[Dict[Text, Any]], + intent: Text, + expected_slot_values: Dict[Text, Text], +): + """Test extraction of other not requested slots values from entities.""" + + form_name = "some_form" + form = FormAction(form_name, None) + + domain = Domain.from_dict( + { + "forms": [ + { + form_name: { + "some_other_slot": some_other_slot_mapping, + "some_slot": some_slot_mapping, + } + } + ] + } + ) + + tracker = DialogueStateTracker.from_events( + "default", + [ + SlotSet(REQUESTED_SLOT, "some_slot"), + UserUttered( + "bla", intent={"name": intent, "confidence": 1.0}, entities=entities + ), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + slot_values = form.extract_other_slots(tracker, domain) + # check that the value was extracted for non requested slot + assert slot_values == expected_slot_values + + +@pytest.mark.parametrize( + "domain, expected_action", + [ + ({}, "utter_ask_sun"), + ( + { + "actions": ["action_ask_my_form_sun", "action_ask_sun"], + "responses": {"utter_ask_my_form_sun": [{"text": "ask"}]}, + }, + "action_ask_my_form_sun", + ), + ( + { + "actions": ["action_ask_sun"], + "responses": {"utter_ask_my_form_sun": [{"text": "ask"}]}, + }, + "utter_ask_my_form_sun", + ), + ( + { + "actions": ["action_ask_sun"], + "responses": {"utter_ask_sun": [{"text": "hi"}]}, + }, + "action_ask_sun", + ), + ( + { + "actions": ["action_ask_my_form_sun"], + "responses": {"utter_ask_my_form_sun": [{"text": "hi"}]}, + }, + "action_ask_my_form_sun", + ), + ], +) +async def test_ask_for_slot( + domain: Dict, expected_action: Text, monkeypatch: MonkeyPatch +): + slot_name = "sun" + + action_from_name = Mock(return_value=action.ActionListen()) + monkeypatch.setattr(action, action.action_from_name.__name__, action_from_name) + + form = FormAction("my_form", None) + await form._ask_for_slot( + Domain.from_dict(domain), + None, + None, + slot_name, + DialogueStateTracker.from_events("dasd", []), + ) + + action_from_name.assert_called_once_with(expected_action, None, ANY) diff --git a/tests/core/actions/test_loops.py b/tests/core/actions/test_loops.py new file mode 100644 index 000000000000..2f2290e4afb2 --- /dev/null +++ b/tests/core/actions/test_loops.py @@ -0,0 +1,167 @@ +from typing import List, Any, Text + +import pytest +from rasa.core.actions.loops import LoopAction +from rasa.core.channels import CollectingOutputChannel +from rasa.core.domain import Domain +from rasa.core.events import ( + Event, + ActionExecutionRejected, + ActionExecuted, + ActiveLoop, + SlotSet, +) +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.core.trackers import DialogueStateTracker + + +async def test_whole_loop(): + expected_activation_events = [ + ActionExecutionRejected("tada"), + ActionExecuted("test"), + ] + + expected_do_events = [ActionExecuted("do")] + expected_deactivation_events = [SlotSet("deactivated")] + + form_name = "my form" + + class MyLoop(LoopAction): + def name(self) -> Text: + return form_name + + async def activate(self, *args: Any) -> List[Event]: + return expected_activation_events + + async def do(self, *args: Any) -> List[Event]: + events_so_far = args[-1] + assert events_so_far == [ActiveLoop(form_name), *expected_activation_events] + + return expected_do_events + + async def deactivate(self, *args) -> List[Event]: + events_so_far = args[-1] + assert events_so_far == [ + ActiveLoop(form_name), + *expected_activation_events, + *expected_do_events, + ActiveLoop(None), + ] + + return expected_deactivation_events + + async def is_done(self, *args) -> bool: + events_so_far = args[-1] + return events_so_far == [ + ActiveLoop(form_name), + *expected_activation_events, + *expected_do_events, + ] + + tracker = DialogueStateTracker.from_events("some sender", []) + domain = Domain.empty() + + action = MyLoop() + actual = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert actual == [ + ActiveLoop(form_name), + *expected_activation_events, + *expected_do_events, + ActiveLoop(None), + *expected_deactivation_events, + ] + + +async def test_loop_without_deactivate(): + expected_activation_events = [ + ActionExecutionRejected("tada"), + ActionExecuted("test"), + ] + + expected_do_events = [ActionExecuted("do")] + form_name = "my form" + + class MyLoop(LoopAction): + def name(self) -> Text: + return form_name + + async def activate(self, *args: Any) -> List[Event]: + return expected_activation_events + + async def do(self, *args: Any) -> List[Event]: + return expected_do_events + + async def deactivate(self, *args) -> List[Event]: + raise ValueError("this shouldn't be called") + + async def is_done(self, *args) -> bool: + return False + + tracker = DialogueStateTracker.from_events("some sender", []) + domain = Domain.empty() + + action = MyLoop() + actual = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert actual == [ + ActiveLoop(form_name), + *expected_activation_events, + *expected_do_events, + ] + + +async def test_loop_without_activate_and_without_deactivate(): + expected_do_events = [ActionExecuted("do")] + form_name = "my form" + + class MyLoop(LoopAction): + def name(self) -> Text: + return form_name + + async def activate(self, *args: Any) -> List[Event]: + raise ValueError("this shouldn't be called") + + async def do(self, *args: Any) -> List[Event]: + return expected_do_events + + async def deactivate(self, *args) -> List[Event]: + return [SlotSet("deactivated")] + + async def is_activated(self, *args: Any) -> bool: + return True + + async def is_done(self, *args) -> bool: + return False + + tracker = DialogueStateTracker.from_events("some sender", []) + domain = Domain.empty() + + action = MyLoop() + actual = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert actual == [*expected_do_events] + + +async def test_raise_not_implemented_error(): + loop = LoopAction() + with pytest.raises(NotImplementedError): + await loop.do(None, None, None, None, []) + + with pytest.raises(NotImplementedError): + await loop.is_done(None, None, None, None, []) diff --git a/tests/core/actions/test_two_stage_fallback.py b/tests/core/actions/test_two_stage_fallback.py new file mode 100644 index 000000000000..e7899ddfdc0a --- /dev/null +++ b/tests/core/actions/test_two_stage_fallback.py @@ -0,0 +1,320 @@ +from typing import List, Text + +import pytest + +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME +from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.core.actions.two_stage_fallback import ( + TwoStageFallbackAction, + ACTION_TWO_STAGE_FALLBACK_NAME, +) +from rasa.core.channels import CollectingOutputChannel +from rasa.core.constants import USER_INTENT_OUT_OF_SCOPE +from rasa.core.domain import Domain +from rasa.core.events import ( + ActionExecuted, + UserUttered, + ActiveLoop, + BotUttered, + UserUtteranceReverted, + Event, +) +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.core.trackers import DialogueStateTracker +from rasa.nlu.constants import INTENT_RANKING_KEY + + +def _message_requiring_fallback() -> List[Event]: + return [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered( + "hi", + {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}, + parse_data={ + INTENT_RANKING_KEY: [ + {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}, + {"name": "greet"}, + {"name": "bye"}, + ] + }, + ), + ] + + +def _two_stage_clarification_request() -> List[Event]: + return [ActionExecuted(ACTION_TWO_STAGE_FALLBACK_NAME), BotUttered("please affirm")] + + +async def test_ask_affirmation(): + tracker = DialogueStateTracker.from_events( + "some-sender", evts=_message_requiring_fallback() + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert len(events) == 2 + assert events[0] == ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME) + assert isinstance(events[1], BotUttered) + + +async def test_1st_affirmation_is_successful(): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User affirms + UserUttered("hi", {"name": "greet", "confidence": 1.0}), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + for events in events: + tracker.update(events, domain) + + applied_events = tracker.applied_events() + assert applied_events == [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("hi", {"name": "greet", "confidence": 1.0}), + ] + + +async def test_give_it_up_after_low_confidence_after_affirm_request(): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + # User's affirms with low NLU confidence again + *_message_requiring_fallback(), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert events == [ActiveLoop(None), UserUtteranceReverted()] + + +async def test_ask_rephrase_after_failed_affirmation(): + rephrase_text = "please rephrase" + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents + UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}), + ], + ) + + domain = Domain.from_yaml( + f""" + responses: + utter_ask_rephrase: + - {rephrase_text} + """ + ) + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert len(events) == 1 + assert isinstance(events[0], BotUttered) + + bot_utterance = events[0] + assert isinstance(bot_utterance, BotUttered) + assert bot_utterance.text == rephrase_text + + +async def test_ask_rephrasing_successful(): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents + UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}), + *_two_stage_clarification_request(), + # Action asks user to rephrase + ActionExecuted(ACTION_LISTEN_NAME), + # User rephrases successfully + UserUttered("hi", {"name": "greet"}), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + for event in events: + tracker.update(event) + + applied_events = tracker.applied_events() + assert applied_events == [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("hi", {"name": "greet"}), + ] + + +async def test_ask_affirm_after_rephrasing(): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents + UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}), + # Action asks user to rephrase + ActionExecuted(ACTION_TWO_STAGE_FALLBACK_NAME), + BotUttered("please rephrase"), + # User rephrased with low confidence + *_message_requiring_fallback(), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert len(events) == 1 + assert isinstance(events[0], BotUttered) + + +async def test_2nd_affirm_successful(): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents + UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}), + # Action asks user to rephrase + *_two_stage_clarification_request(), + # User rephrased with low confidence + *_message_requiring_fallback(), + *_two_stage_clarification_request(), + # Actions asks user to affirm for the last time + ActionExecuted(ACTION_LISTEN_NAME), + # User affirms successfully + UserUttered("hi", {"name": "greet"}), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + for event in events: + tracker.update(event) + + applied_events = tracker.applied_events() + + assert applied_events == [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("hi", {"name": "greet"}), + ] + + +@pytest.mark.parametrize( + "intent_which_lets_action_give_up", + [USER_INTENT_OUT_OF_SCOPE, DEFAULT_NLU_FALLBACK_INTENT_NAME], +) +async def test_2nd_affirmation_failed(intent_which_lets_action_give_up: Text): + tracker = DialogueStateTracker.from_events( + "some-sender", + evts=[ + # User sends message with low NLU confidence + *_message_requiring_fallback(), + ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), + # Action asks user to affirm + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents + UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}), + # Action asks user to rephrase + *_two_stage_clarification_request(), + # User rephrased with low confidence + *_message_requiring_fallback(), + # Actions asks user to affirm for the last time + *_two_stage_clarification_request(), + ActionExecuted(ACTION_LISTEN_NAME), + # User denies suggested intents for the second time + UserUttered("hi", {"name": intent_which_lets_action_give_up}), + ], + ) + domain = Domain.empty() + action = TwoStageFallbackAction() + + events = await action.run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + assert events == [ActiveLoop(None), UserUtteranceReverted()] diff --git a/tests/core/channels/test_cmdline.py b/tests/core/channels/test_cmdline.py new file mode 100644 index 000000000000..7a3a9269b51e --- /dev/null +++ b/tests/core/channels/test_cmdline.py @@ -0,0 +1,109 @@ +from typing import List, Text + +from _pytest.capture import CaptureFixture +from _pytest.monkeypatch import MonkeyPatch +from prompt_toolkit import PromptSession, Application +from prompt_toolkit.input.defaults import create_pipe_input +from prompt_toolkit.output import DummyOutput +from rasa.core.channels.console import record_messages + +from aioresponses import aioresponses + +ENTER = "\x0a" + + +def mock_stdin(input_from_stdin: List[Text]): + inp = create_pipe_input() + + text = "" + for line in input_from_stdin: + text += line + ENTER + "\r" + + inp.send_text(text) + + prompt_session_init = PromptSession.__init__ + + def prompt_session_init_fake(self, *k, **kw): + prompt_session_init(self, input=inp, output=DummyOutput(), *k, **kw) + + PromptSession.__init__ = prompt_session_init_fake + + application_init = Application.__init__ + + def application_init_fake(self, *k, **kw): + kw.pop("input", None) + kw.pop("output", None) + application_init(self, input=inp, output=DummyOutput(), *k, **kw) + + Application.__init__ = application_init_fake + + return inp + + +async def test_record_messages(monkeypatch: MonkeyPatch, capsys: CaptureFixture): + input_output = [ + { + "in": "Give me a question!", + "out": [ + { + "buttons": [ + { + "title": "button 1 title", + "payload": "button 1 payload", + "details": "button 1 details", + } + ], + "text": "This is a button 1", + }, + { + "buttons": [ + { + "title": "button 2 title", + "payload": "button 2 payload", + "details": "button 2 details", + } + ], + "text": "This is a button 2", + }, + { + "buttons": [ + { + "title": "button 3 title", + "payload": "button 3 payload", + "details": "button 3 details", + } + ], + "text": "This is a button 3", + }, + ], + }, + {"in": ENTER, "out": [{"text": "You've pressed the button"}]}, + {"in": "Dummy message", "out": [{"text": "Dummy response"}]}, + ] + + inp = mock_stdin([m["in"] for m in input_output]) + + server_url = "http://example.com" + endpoint = f"{server_url}/webhooks/rest/webhook" + + with aioresponses() as mocked: + + for output in [m["out"] for m in input_output]: + if output: + mocked.post(url=endpoint, payload=output) + + num_of_messages = await record_messages( + "123", + server_url=server_url, + max_message_limit=len(input_output), + use_response_stream=False, + ) + + assert num_of_messages == len(input_output) + + captured = capsys.readouterr() + + assert "button 1 payload" in captured.out + assert "button 2 payload" in captured.out + + inp.close() diff --git a/tests/core/channels/test_hangouts.py b/tests/core/channels/test_hangouts.py new file mode 100644 index 000000000000..b2eb209fd348 --- /dev/null +++ b/tests/core/channels/test_hangouts.py @@ -0,0 +1,217 @@ +import json +import logging + +import pytest + +from rasa.core import utils + +logger = logging.getLogger(__name__) + + +def test_hangouts_channel(): + + from rasa.core.channels.hangouts import HangoutsInput + import rasa.core + + input_channel = HangoutsInput( + project_id="12345678901", + # intent name for bot added to direct message event + hangouts_user_added_intent_name="/added_dm", + # intent name for bot added to room event + hangouts_room_added_intent_name="/added_room", + # intent name for bot removed from space event + hangouts_removed_intent_name="/removed", + ) + + s = rasa.core.run.configure_app([input_channel], port=5004) + + routes_list = utils.list_routes(s) + print(routes_list) + assert routes_list.get("hangouts_webhook.health").startswith("/webhooks/hangouts") + assert routes_list.get("hangouts_webhook.receive").startswith( + "/webhooks/hangouts/webhook" + ) + + +def test_hangouts_extract_functions(): + + # from https://developers.google.com/hangouts/chat/reference/message-formats/events#added_to_space + ADDED_EVENT = { + "type": "ADDED_TO_SPACE", + "eventTime": "2017-03-02T19:02:59.910959Z", + "space": { + "name": "spaces/AAAAAAAAAAA", + "displayName": "Chuck Norris Discussion Room", + "type": "ROOM", + }, + "user": { + "name": "users/12345678901234567890", + "displayName": "Chuck Norris", + "avatarUrl": "https://lh3.googleusercontent.com/.../photo.jpg", + "email": "chuck@example.com", + }, + } + + # from https://developers.google.com/hangouts/chat/reference/message-formats/events#removed_from_space + REMOVED_EVENT = { + "type": "REMOVED_FROM_SPACE", + "eventTime": "2017-03-02T19:02:59.910959Z", + "space": {"name": "spaces/AAAAAAAAAAA", "type": "DM"}, + "user": { + "name": "users/12345678901234567890", + "displayName": "Chuck Norris", + "avatarUrl": "https://lh3.googleusercontent.com/.../photo.jpg", + "email": "chuck@example.com", + }, + } + + # from https://developers.google.com/hangouts/chat/reference/message-formats/events#message + MESSAGE = { + "type": "MESSAGE", + "eventTime": "2017-03-02T19:02:59.910959Z", + "space": { + "name": "spaces/AAAAAAAAAAA", + "displayName": "Chuck Norris Discussion Room", + "type": "ROOM", + }, + "message": { + "name": "spaces/AAAAAAAAAAA/messages/CCCCCCCCCCC", + "sender": { + "name": "users/12345678901234567890", + "displayName": "Chuck Norris", + "avatarUrl": "https://lh3.googleusercontent.com/.../photo.jpg", + "email": "chuck@example.com", + }, + "createTime": "2017-03-02T19:02:59.910959Z", + "text": "@TestBot Violence is my last option.", + "argumentText": " Violence is my last option.", + "thread": {"name": "spaces/AAAAAAAAAAA/threads/BBBBBBBBBBB"}, + "annotations": [ + { + "length": 8, + "startIndex": 0, + "userMention": { + "type": "MENTION", + "user": { + "avatarUrl": "https://.../avatar.png", + "displayName": "TestBot", + "name": "users/1234567890987654321", + "type": "BOT", + }, + }, + "type": "USER_MENTION", + } + ], + }, + "user": { + "name": "users/12345678901234567890", + "displayName": "Chuck Norris", + "avatarUrl": "https://lh3.googleusercontent.com/.../photo.jpg", + "email": "chuck@example.com", + }, + } + + from rasa.core.channels.hangouts import HangoutsInput + import rasa.core + + input_channel = HangoutsInput( + project_id="12345678901", + # intent name for bot added to direct message event + hangouts_user_added_intent_name="/added_dm", + # intent name for bot added to room event + hangouts_room_added_intent_name="/added_room", + # intent name for bot removed from space event + hangouts_removed_intent_name="/removed", + ) + + app = rasa.core.run.configure_app([input_channel], port=5004) + + # This causes irritating error even though test passes... + # req, _ = app.test_client.post("/webhooks/hangouts/webhook", data=json.dumps(MESSAGE)) + + # ..therefore create Request object directly + from sanic.request import Request + + def create_req(app): + return Request( + b"http://127.0.0.1:42101/webhooks/hangouts/webhook", + None, + None, + "POST", + None, + app=app, + ) + + req = create_req(app) + req.body = bytes(json.dumps(MESSAGE), encoding="utf-8") + assert input_channel._extract_sender(req) == "Chuck Norris" + assert input_channel._extract_room(req) == "Chuck Norris Discussion Room" + assert input_channel._extract_message(req) == "@TestBot Violence is my last option." + + req = create_req(app) + req.body = bytes(json.dumps(ADDED_EVENT), encoding="utf-8") + assert input_channel._extract_sender(req) == "Chuck Norris" + assert input_channel._extract_room(req) == "Chuck Norris Discussion Room" + assert input_channel._extract_message(req) == "/added_room" + + req = create_req(app) + req.body = bytes(json.dumps(REMOVED_EVENT), encoding="utf-8") + assert input_channel._extract_sender(req) == "Chuck Norris" + assert input_channel._extract_room(req) is None + assert input_channel._extract_message(req) == "/removed" + + +@pytest.mark.asyncio +async def test_hangouts_output_channel_functions(): + + from rasa.core.channels.hangouts import HangoutsOutput + + output_channel = HangoutsOutput() + + # with every call to _persist_message, the messages attribute (dict) is altered, + # as Hangouts always expects a single dict as response + + await output_channel.send_text_message(recipient_id="Chuck Norris", text="Test:") + + assert len(output_channel.messages) == 1 + assert output_channel.messages["text"] == "Test:" + + await output_channel.send_attachment( + recipient_id="Chuck Norris", attachment="Attachment" + ) + + assert len(output_channel.messages) == 1 + # two text messages are appended with space inbetween + assert output_channel.messages["text"] == "Test: Attachment" + + await output_channel.send_quick_replies( + recipient_id="Chuck Norris", + text="Test passing?", + quick_replies=[ + {"title": "Yes", "payload": "/confirm"}, + {"title": "No", "payload": "/deny"}, + ], + ) + assert len(output_channel.messages) == 1 + # for text and cards, text is turned to card format and two cards are returned + assert ( + output_channel.messages["cards"][1]["sections"][0]["widgets"][0][ + "textParagraph" + ]["text"] + == "Test passing?" + ) + assert ( + output_channel.messages["cards"][1]["sections"][0]["widgets"][1]["buttons"][0][ + "textButton" + ]["onClick"]["action"]["actionMethodName"] + == "/confirm" + ) + + await output_channel.send_image_url(recipient_id="Chuck Norris", image="test.png") + assert len(output_channel.messages) == 1 + assert ( + output_channel.messages["cards"][2]["sections"][0]["widgets"][0]["image"][ + "imageUrl" + ] + == "test.png" + ) diff --git a/tests/core/conftest.py b/tests/core/conftest.py index c7813e035240..b3e08caaa91f 100644 --- a/tests/core/conftest.py +++ b/tests/core/conftest.py @@ -1,45 +1,52 @@ import asyncio import os -from typing import Text -import matplotlib +from sanic.request import Request +import uuid +from datetime import datetime + +from typing import Text, Iterator + import pytest import rasa.utils.io from rasa.core.agent import Agent -from rasa.core.channels.channel import CollectingOutputChannel +from rasa.core.channels.channel import CollectingOutputChannel, OutputChannel from rasa.core.domain import Domain -from rasa.core.interpreter import RegexInterpreter +from rasa.core.events import ReminderScheduled, UserUttered, ActionExecuted from rasa.core.nlg import TemplatedNaturalLanguageGenerator -from rasa.core.policies.ensemble import PolicyEnsemble, SimplePolicyEnsemble -from rasa.core.policies.memoization import ( - AugmentedMemoizationPolicy, - MemoizationPolicy, - Policy, -) +from rasa.core.policies.ensemble import PolicyEnsemble +from rasa.core.policies.memoization import Policy from rasa.core.processor import MessageProcessor from rasa.core.slots import Slot -from rasa.core.tracker_store import InMemoryTrackerStore +from rasa.core.tracker_store import InMemoryTrackerStore, MongoTrackerStore from rasa.core.trackers import DialogueStateTracker -from rasa.train import train_async DEFAULT_DOMAIN_PATH_WITH_SLOTS = "data/test_domains/default_with_slots.yml" +DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS = ( + "data/test_domains/default_with_slots_and_no_actions.yml" +) + DEFAULT_DOMAIN_PATH_WITH_MAPPING = "data/test_domains/default_with_mapping.yml" DEFAULT_STORIES_FILE = "data/test_stories/stories_defaultdomain.md" DEFAULT_STACK_CONFIG = "data/test_config/stack_config.yml" -DEFAULT_NLU_DATA = "examples/moodbot/data/nlu.md" +INCORRECT_NLU_DATA = "data/test/markdown_single_sections/incorrect_nlu_format.md" END_TO_END_STORY_FILE = "data/test_evaluations/end_to_end_story.md" E2E_STORY_FILE_UNKNOWN_ENTITY = "data/test_evaluations/story_unknown_entity.md" -MOODBOT_MODEL_PATH = "examples/moodbot/models/" +STORY_FILE_TRIPS_CIRCUIT_BREAKER = ( + "data/test_evaluations/stories_trip_circuit_breaker.md" +) -RESTAURANTBOT_PATH = "examples/restaurantbot/" +E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER = ( + "data/test_evaluations/end_to_end_trips_circuit_breaker.md" +) DEFAULT_ENDPOINTS_FILE = "data/test_endpoints/example_endpoints.yml" @@ -47,15 +54,14 @@ "data/test_dialogues/default.json", "data/test_dialogues/formbot.json", "data/test_dialogues/moodbot.json", - "data/test_dialogues/restaurantbot.json", ] EXAMPLE_DOMAINS = [ DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS, DEFAULT_DOMAIN_PATH_WITH_MAPPING, "examples/formbot/domain.yml", "examples/moodbot/domain.yml", - "examples/restaurantbot/domain.yml", ] @@ -70,7 +76,28 @@ def __init__(self, example_arg): pass -@pytest.fixture +class MockedMongoTrackerStore(MongoTrackerStore): + """In-memory mocked version of `MongoTrackerStore`.""" + + def __init__(self, _domain: Domain): + from mongomock import MongoClient + + self.db = MongoClient().rasa + self.collection = "conversations" + super(MongoTrackerStore, self).__init__(_domain, None) + + +# https://github.com/pytest-dev/pytest-asyncio/issues/68 +# this event_loop is used by pytest-asyncio, and redefining it +# is currently the only way of changing the scope of this fixture +@pytest.yield_fixture(scope="session") +def event_loop(request: Request) -> Iterator[asyncio.AbstractEventLoop]: + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest.fixture(scope="session") def loop(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) @@ -96,6 +123,8 @@ def default_stack_config(): @pytest.fixture(scope="session") def default_nlu_data(): + from tests.conftest import DEFAULT_NLU_DATA + return DEFAULT_NLU_DATA @@ -104,53 +133,66 @@ def default_domain(): return Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS) -@pytest.fixture(scope="session") -async def default_agent(default_domain) -> Agent: - agent = Agent( - default_domain, - policies=[MemoizationPolicy()], - interpreter=RegexInterpreter(), - tracker_store=InMemoryTrackerStore(default_domain), - ) - training_data = await agent.load_data(DEFAULT_STORIES_FILE) - agent.train(training_data) - return agent - - -@pytest.fixture(scope="session") -def default_agent_path(default_agent, tmpdir_factory): - path = tmpdir_factory.mktemp("agent").strpath - default_agent.persist(path) - return path - - @pytest.fixture -def default_channel(): +def default_channel() -> OutputChannel: return CollectingOutputChannel() @pytest.fixture -async def default_processor(default_domain, default_nlg): - agent = Agent( - default_domain, - SimplePolicyEnsemble([AugmentedMemoizationPolicy()]), - interpreter=RegexInterpreter(), - ) - - training_data = await agent.load_data(DEFAULT_STORIES_FILE) - agent.train(training_data) - tracker_store = InMemoryTrackerStore(default_domain) +async def default_processor(default_agent: Agent) -> MessageProcessor: + tracker_store = InMemoryTrackerStore(default_agent.domain) return MessageProcessor( - agent.interpreter, - agent.policy_ensemble, - default_domain, + default_agent.interpreter, + default_agent.policy_ensemble, + default_agent.domain, tracker_store, - default_nlg, + TemplatedNaturalLanguageGenerator(default_agent.domain.templates), ) +@pytest.fixture +def tracker_with_six_scheduled_reminders( + default_processor: MessageProcessor, +) -> DialogueStateTracker: + reminders = [ + ReminderScheduled("greet", datetime.now(), kill_on_user_message=False), + ReminderScheduled( + intent="greet", + entities=[{"entity": "name", "value": "Jane Doe"}], + trigger_date_time=datetime.now(), + kill_on_user_message=False, + ), + ReminderScheduled( + intent="default", + entities=[{"entity": "name", "value": "Jane Doe"}], + trigger_date_time=datetime.now(), + kill_on_user_message=False, + ), + ReminderScheduled( + intent="greet", + entities=[{"entity": "name", "value": "Bruce Wayne"}], + trigger_date_time=datetime.now(), + kill_on_user_message=False, + ), + ReminderScheduled("default", datetime.now(), kill_on_user_message=False), + ReminderScheduled( + "default", datetime.now(), kill_on_user_message=False, name="special" + ), + ] + sender_id = uuid.uuid4().hex + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + for reminder in reminders: + tracker.update(UserUttered("test")) + tracker.update(ActionExecuted("action_reminder_reminder")) + tracker.update(reminder) + + default_processor.tracker_store.save(tracker) + + return tracker + + @pytest.fixture(scope="session") -def moodbot_domain(trained_moodbot_path): +def moodbot_domain(): domain_path = os.path.join("examples", "moodbot", "domain.yml") return Domain.load(domain_path) @@ -162,34 +204,6 @@ def moodbot_metadata(unpacked_trained_moodbot_path): ) -@pytest.fixture() -async def trained_stack_model( - default_domain_path, default_stack_config, default_nlu_data, default_stories_file -): - trained_stack_model_path = await train_async( - domain=default_domain_path, - config=default_stack_config, - training_files=[default_nlu_data, default_stories_file], - ) - - return trained_stack_model_path - - -@pytest.fixture -async def prepared_agent(tmpdir_factory) -> Agent: - model_path = tmpdir_factory.mktemp("model").strpath - - agent = Agent( - "data/test_domains/default.yml", - policies=[AugmentedMemoizationPolicy(max_history=3)], - ) - - training_data = await agent.load_data(DEFAULT_STORIES_FILE) - agent.train(training_data) - agent.persist(model_path) - return agent - - @pytest.fixture def default_nlg(default_domain): return TemplatedNaturalLanguageGenerator(default_domain.templates) @@ -211,38 +225,15 @@ def project() -> Text: return directory -def train_model(project: Text, filename: Text = "test.tar.gz"): - from rasa.constants import ( - DEFAULT_CONFIG_PATH, - DEFAULT_DATA_PATH, - DEFAULT_DOMAIN_PATH, - DEFAULT_MODELS_PATH, - ) - import rasa.train - - output = os.path.join(project, DEFAULT_MODELS_PATH, filename) - domain = os.path.join(project, DEFAULT_DOMAIN_PATH) - config = os.path.join(project, DEFAULT_CONFIG_PATH) - training_files = os.path.join(project, DEFAULT_DATA_PATH) - - rasa.train(domain, config, training_files, output) - - return output - - -@pytest.fixture(scope="session") -def trained_model(project) -> Text: - return train_model(project) - - @pytest.fixture -async def restaurantbot(tmpdir_factory) -> Text: - model_path = tmpdir_factory.mktemp("model").strpath - restaurant_domain = os.path.join(RESTAURANTBOT_PATH, "domain.yml") - restaurant_config = os.path.join(RESTAURANTBOT_PATH, "config.yml") - restaurant_data = os.path.join(RESTAURANTBOT_PATH, "data/") - - agent = await train_async( - restaurant_domain, restaurant_config, restaurant_data, model_path +async def form_bot_agent(trained_async, tmpdir_factory) -> Agent: + zipped_model = await trained_async( + domain="examples/formbot/domain.yml", + config="examples/formbot/config.yml", + training_files=[ + "examples/formbot/data/rules.yml", + "examples/formbot/data/stories.yml", + ], ) - return agent + + return Agent.load_local_model(zipped_model) diff --git a/tests/core/policies/__init__.py b/tests/core/policies/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/core/policies/test_rule_policy.py b/tests/core/policies/test_rule_policy.py new file mode 100644 index 000000000000..cca44ad3ce6d --- /dev/null +++ b/tests/core/policies/test_rule_policy.py @@ -0,0 +1,1174 @@ +from typing import List, Text + +import pytest +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME + +from rasa.core import training +from rasa.core.actions.action import ( + ACTION_LISTEN_NAME, + ACTION_DEFAULT_FALLBACK_NAME, + ActionDefaultFallback, + ACTION_RESTART_NAME, + ACTION_BACK_NAME, + ACTION_SESSION_START_NAME, + RULE_SNIPPET_ACTION_NAME, +) +from rasa.core.channels import CollectingOutputChannel +from rasa.core.constants import ( + REQUESTED_SLOT, + USER_INTENT_RESTART, + USER_INTENT_BACK, + USER_INTENT_SESSION_START, +) +from rasa.core.domain import Domain +from rasa.core.events import ( + ActionExecuted, + UserUttered, + ActiveLoop, + SlotSet, + ActionExecutionRejected, + FormValidation, +) +from rasa.core.interpreter import RegexInterpreter +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.core.policies.rule_policy import RulePolicy +from rasa.core.trackers import DialogueStateTracker +from rasa.core.training.generator import TrackerWithCachedStates + +UTTER_GREET_ACTION = "utter_greet" +GREET_INTENT_NAME = "greet" +GREET_RULE = DialogueStateTracker.from_events( + "bla", + evts=[ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + ActionExecuted(ACTION_LISTEN_NAME), + # Greet is a FAQ here and gets triggered in any context + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(UTTER_GREET_ACTION), + ActionExecuted(ACTION_LISTEN_NAME), + ], +) +GREET_RULE.is_rule_tracker = True + + +def _form_submit_rule( + domain: Domain, submit_action_name: Text, form_name: Text +) -> DialogueStateTracker: + return TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + ActiveLoop(form_name), + # Any events in between + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + # Form runs and deactivates itself + ActionExecuted(form_name), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ActionExecuted(submit_action_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + + +def _form_activation_rule( + domain: Domain, form_name: Text, activation_intent_name: Text +) -> DialogueStateTracker: + return TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + # The intent `other_intent` activates the form + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": activation_intent_name}), + ActionExecuted(form_name), + ActiveLoop(form_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + + +def test_rule_policy_has_max_history_none(): + policy = RulePolicy() + assert policy.featurizer.max_history is None + + +def test_faq_rule(): + domain = Domain.from_yaml( + f""" +intents: +- {GREET_INTENT_NAME} +actions: +- {UTTER_GREET_ACTION} + """ + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + # remove first ... action and utter_greet and last action_listen from greet rule + new_conversation = DialogueStateTracker.from_events( + "simple greet", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ], + ) + action_probabilities = policy.predict_action_probabilities(new_conversation, domain) + + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + +def assert_predicted_action( + action_probabilities: List[float], + domain: Domain, + expected_action_name: Text, + confidence: float = 1.0, +) -> None: + assert max(action_probabilities) == confidence + index_of_predicted_action = action_probabilities.index(max(action_probabilities)) + prediction_action_name = domain.action_names[index_of_predicted_action] + assert prediction_action_name == expected_action_name + + +async def test_predict_form_action_if_in_form(): + form_name = "some_form" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} +""" + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # We are in an activate form + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + # User sends message as response to a requested slot + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ], + slots=domain.slots, + ) + + # RulePolicy triggers form again + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, form_name) + + +async def test_predict_form_action_if_multiple_turns(): + form_name = "some_form" + other_intent = "bye" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} +""" + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # We are in an active form + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + # User responds to slot request + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + # Form validates input and requests another slot + ActionExecuted(form_name), + SlotSet(REQUESTED_SLOT, "some other"), + # User responds to 2nd slot request + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ], + slots=domain.slots, + ) + + # RulePolicy triggers form again + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, form_name) + + +async def test_predict_action_listen_after_form(): + form_name = "some_form" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # We are in an activate form + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + # User sends message as response to a requested slot + UserUttered("haha", {"name": GREET_INTENT_NAME}), + # Form is running again + ActionExecuted(form_name), + ], + slots=domain.slots, + ) + + # RulePolicy predicts action listen + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME) + + +async def test_dont_predict_form_if_already_finished(): + form_name = "some_form" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} +""" + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # We are in an activate form + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + # User sends message as response to a requested slot + UserUttered("haha", {"name": GREET_INTENT_NAME}), + # Form is happy and deactivates itself + ActionExecuted(form_name), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + # User sends another message. Form is already done. Shouldn't get triggered + # again + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ], + slots=domain.slots, + ) + + # RulePolicy triggers form again + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + +async def test_form_unhappy_path(): + form_name = "some_form" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + unhappy_form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # We are in an active form + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + # User responds to slot request + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + # Form isn't happy with the answer and rejects execution + ActionExecutionRejected(form_name), + ], + slots=domain.slots, + ) + + # RulePolicy doesn't trigger form but FAQ + action_probabilities = policy.predict_action_probabilities( + unhappy_form_conversation, domain + ) + + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + +async def test_form_unhappy_path_from_general_rule(): + form_name = "some_form" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + policy = RulePolicy() + # RulePolicy should memorize that unhappy_rule overrides GREET_RULE + policy.train([GREET_RULE], domain, RegexInterpreter()) + + # Check that RulePolicy predicts action to handle unhappy path + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecutionRejected(form_name), + ] + + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + # check that general rule action is predicted + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + # Check that RulePolicy triggers form again after handling unhappy path + conversation_events.append(ActionExecuted(UTTER_GREET_ACTION)) + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + # check that action_listen from general rule is overwritten by form action + assert_predicted_action(action_probabilities, domain, form_name) + + +async def test_form_unhappy_path_from_in_form_rule(): + form_name = "some_form" + handle_rejection_action_name = "utter_handle_rejection" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - {handle_rejection_action_name} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + unhappy_rule = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + # We are in an active form + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + ActionExecuted(ACTION_LISTEN_NAME), + # When a user says "hi", and the form is unhappy, + # we want to run a specific action + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(handle_rejection_action_name), + ActionExecuted(form_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + + policy = RulePolicy() + # RulePolicy should memorize that unhappy_rule overrides GREET_RULE + policy.train([GREET_RULE, unhappy_rule], domain, RegexInterpreter()) + + # Check that RulePolicy predicts action to handle unhappy path + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecutionRejected(form_name), + ] + + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert_predicted_action(action_probabilities, domain, handle_rejection_action_name) + + # Check that RulePolicy triggers form again after handling unhappy path + conversation_events.append(ActionExecuted(handle_rejection_action_name)) + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert_predicted_action(action_probabilities, domain, form_name) + + +async def test_form_unhappy_path_from_story(): + form_name = "some_form" + handle_rejection_action_name = "utter_handle_rejection" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - {handle_rejection_action_name} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + unhappy_story = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + # We are in an active form + ActionExecuted(form_name), + ActiveLoop(form_name), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(UTTER_GREET_ACTION), + # After our bot says "hi", we want to run a specific action + ActionExecuted(handle_rejection_action_name), + ActionExecuted(form_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + policy = RulePolicy() + policy.train([GREET_RULE, unhappy_story], domain, RegexInterpreter()) + + # Check that RulePolicy predicts action to handle unhappy path + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecutionRejected(form_name), + ] + + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + # Check that RulePolicy doesn't trigger form or action_listen + # after handling unhappy path + conversation_events.append(ActionExecuted(handle_rejection_action_name)) + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert max(action_probabilities) == policy._core_fallback_threshold + + +async def test_form_unhappy_path_no_validation_from_rule(): + form_name = "some_form" + handle_rejection_action_name = "utter_handle_rejection" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - {handle_rejection_action_name} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + unhappy_rule = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + # We are in an active form + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + ActionExecuted(ACTION_LISTEN_NAME), + # When a user says "hi", and the form is unhappy, + # we want to run a specific action + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(handle_rejection_action_name), + # Next user utterance is an answer to the previous question + # and shouldn't be validated by the form + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(form_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + + policy = RulePolicy() + # RulePolicy should memorize that unhappy_rule overrides GREET_RULE + policy.train([GREET_RULE, unhappy_rule], domain, RegexInterpreter()) + + # Check that RulePolicy predicts action to handle unhappy path + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecutionRejected(form_name), + ] + + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert_predicted_action(action_probabilities, domain, handle_rejection_action_name) + + # Check that RulePolicy predicts action_listen + conversation_events.append(ActionExecuted(handle_rejection_action_name)) + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME) + + # Check that RulePolicy triggers form again after handling unhappy path + conversation_events.append(ActionExecuted(ACTION_LISTEN_NAME)) + tracker = DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ) + action_probabilities = policy.predict_action_probabilities(tracker, domain) + assert_predicted_action(action_probabilities, domain, form_name) + # check that RulePolicy added FormValidation False event based on the training rule + assert tracker.events[-1] == FormValidation(False) + + +async def test_form_unhappy_path_no_validation_from_story(): + form_name = "some_form" + handle_rejection_action_name = "utter_handle_rejection" + + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - {handle_rejection_action_name} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + unhappy_story = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + # We are in an active form + ActionExecuted(form_name), + ActiveLoop(form_name), + # When a user says "hi", and the form is unhappy, + # we want to run a specific action + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(handle_rejection_action_name), + ActionExecuted(ACTION_LISTEN_NAME), + # Next user utterance is an answer to the previous question + # and shouldn't be validated by the form + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(form_name), + ActionExecuted(ACTION_LISTEN_NAME), + ], + ) + + policy = RulePolicy() + policy.train([unhappy_story], domain, RegexInterpreter()) + + # Check that RulePolicy predicts no validation to handle unhappy path + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecutionRejected(form_name), + ActionExecuted(handle_rejection_action_name), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ] + + tracker = DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ) + action_probabilities = policy.predict_action_probabilities(tracker, domain) + # there is no rule for next action + assert max(action_probabilities) == policy._core_fallback_threshold + # check that RulePolicy added FormValidation False event based on the training story + assert tracker.events[-1] == FormValidation(False) + + +async def test_form_unhappy_path_without_rule(): + form_name = "some_form" + other_intent = "bye" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + conversation_events = [ + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ActiveLoop(form_name), + ActionExecutionRejected(form_name), + ] + + # Unhappy path is not handled. No rule matches. Let's hope ML fixes our problems 🤞 + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + + assert max(action_probabilities) == policy._core_fallback_threshold + + +async def test_form_activation_rule(): + form_name = "some_form" + other_intent = "bye" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + form_activation_rule = _form_activation_rule(domain, form_name, other_intent) + policy = RulePolicy() + policy.train([GREET_RULE, form_activation_rule], domain, RegexInterpreter()) + + conversation_events = [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ] + + # RulePolicy correctly predicts the form action + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + + assert_predicted_action(action_probabilities, domain, form_name) + + +async def test_failing_form_activation_due_to_no_rule(): + form_name = "some_form" + other_intent = "bye" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + - some-action + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + + conversation_events = [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ] + + # RulePolicy has no matching rule since no rule for form activation is given + action_probabilities = policy.predict_action_probabilities( + DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ), + domain, + ) + + assert max(action_probabilities) == policy._core_fallback_threshold + + +def test_form_submit_rule(): + form_name = "some_form" + submit_action_name = "utter_submit" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + - {submit_action_name} + slots: + {REQUESTED_SLOT}: + type: unfeaturized + forms: + - {form_name} + """ + ) + + form_submit_rule = _form_submit_rule(domain, submit_action_name, form_name) + + policy = RulePolicy() + policy.train([GREET_RULE, form_submit_rule], domain, RegexInterpreter()) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # Form was activated + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(form_name), + ActiveLoop(form_name), + SlotSet(REQUESTED_SLOT, "some value"), + ActionExecuted(ACTION_LISTEN_NAME), + # User responds and fills requested slot + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(form_name), + # Form get's deactivated + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + slots=domain.slots, + ) + + # RulePolicy predicts action which handles submit + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, submit_action_name) + + +def test_immediate_submit(): + form_name = "some_form" + submit_action_name = "utter_submit" + entity = "some_entity" + slot = "some_slot" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + - some-action + - {submit_action_name} + slots: + {REQUESTED_SLOT}: + type: unfeaturized + {slot}: + type: unfeaturized + forms: + - {form_name} + entities: + - {entity} + """ + ) + + form_activation_rule = _form_activation_rule(domain, form_name, GREET_INTENT_NAME) + form_submit_rule = _form_submit_rule(domain, submit_action_name, form_name) + + policy = RulePolicy() + policy.train( + [GREET_RULE, form_activation_rule, form_submit_rule], domain, RegexInterpreter() + ) + + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + # Form was activated + ActionExecuted(ACTION_LISTEN_NAME), + # The same intent which activates the form also deactivates it + UserUttered( + "haha", + {"name": GREET_INTENT_NAME}, + entities=[{"entity": entity, "value": "Bruce Wayne"}], + ), + SlotSet(slot, "Bruce"), + ActionExecuted(form_name), + SlotSet("bla", "bla"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + slots=domain.slots, + ) + + # RulePolicy predicts action which handles submit + action_probabilities = policy.predict_action_probabilities( + form_conversation, domain + ) + assert_predicted_action(action_probabilities, domain, submit_action_name) + + +@pytest.fixture(scope="session") +def trained_rule_policy_domain() -> Domain: + return Domain.load("examples/rules/domain.yml") + + +@pytest.fixture(scope="session") +async def trained_rule_policy(trained_rule_policy_domain: Domain) -> RulePolicy: + trackers = await training.load_data( + "examples/rules/data/rules.yml", trained_rule_policy_domain + ) + + rule_policy = RulePolicy() + rule_policy.train(trackers, trained_rule_policy_domain, RegexInterpreter()) + + return rule_policy + + +async def test_rule_policy_slot_filling_from_text( + trained_rule_policy: RulePolicy, trained_rule_policy_domain: Domain +): + form_conversation = DialogueStateTracker.from_events( + "in a form", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + # User responds and fills requested slot + UserUttered("/activate_q_form", {"name": "activate_q_form"}), + ActionExecuted("loop_q_form"), + ActiveLoop("loop_q_form"), + SlotSet(REQUESTED_SLOT, "some_slot"), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("/bla", {"name": GREET_INTENT_NAME}), + ActionExecuted("loop_q_form"), + SlotSet("some_slot", "/bla"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + slots=trained_rule_policy_domain.slots, + ) + + # RulePolicy predicts action which handles submit + action_probabilities = trained_rule_policy.predict_action_probabilities( + form_conversation, trained_rule_policy_domain + ) + assert_predicted_action( + action_probabilities, trained_rule_policy_domain, "utter_stop" + ) + + +async def test_one_stage_fallback_rule(): + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {DEFAULT_NLU_FALLBACK_INTENT_NAME} + actions: + - {UTTER_GREET_ACTION} + """ + ) + + fallback_recover_rule = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + slots=domain.slots, + evts=[ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}), + ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + + greet_rule_which_only_applies_at_start = TrackerWithCachedStates.from_events( + "bla", + domain=domain, + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(UTTER_GREET_ACTION), + ActionExecuted(ACTION_LISTEN_NAME), + ], + is_rule_tracker=True, + ) + policy = RulePolicy() + policy.train( + [greet_rule_which_only_applies_at_start, fallback_recover_rule], + domain, + RegexInterpreter(), + ) + + # RulePolicy predicts fallback action + conversation_events = [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("dasdakl;fkasd", {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}), + ] + tracker = DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ) + action_probabilities = policy.predict_action_probabilities(tracker, domain) + assert_predicted_action(action_probabilities, domain, ACTION_DEFAULT_FALLBACK_NAME) + + # Fallback action reverts fallback events, next action is `ACTION_LISTEN` + conversation_events += await ActionDefaultFallback().run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator(domain.templates), + tracker, + domain, + ) + + # Rasa is back on track when user rephrased intent + conversation_events += [ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ] + tracker = DialogueStateTracker.from_events( + "casd", evts=conversation_events, slots=domain.slots + ) + + action_probabilities = policy.predict_action_probabilities(tracker, domain) + assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) + + +@pytest.mark.parametrize( + "intent_name, expected_action_name", + [ + (USER_INTENT_RESTART, ACTION_RESTART_NAME), + (USER_INTENT_BACK, ACTION_BACK_NAME), + (USER_INTENT_SESSION_START, ACTION_SESSION_START_NAME), + ], +) +def test_default_actions(intent_name: Text, expected_action_name: Text): + domain = Domain.from_yaml( + f""" +intents: +- {GREET_INTENT_NAME} +actions: +- {UTTER_GREET_ACTION} + """ + ) + policy = RulePolicy() + policy.train([GREET_RULE], domain, RegexInterpreter()) + new_conversation = DialogueStateTracker.from_events( + "bla2", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": GREET_INTENT_NAME}), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": intent_name}), + ], + ) + action_probabilities = policy.predict_action_probabilities(new_conversation, domain) + + assert_predicted_action(action_probabilities, domain, expected_action_name) + + +@pytest.mark.parametrize( + "rule_policy, expected_confidence, expected_prediction", + [ + (RulePolicy(), 0.3, ACTION_DEFAULT_FALLBACK_NAME), + ( + RulePolicy( + core_fallback_threshold=0.1, + core_fallback_action_name="my_core_fallback", + ), + 0.1, + "my_core_fallback", + ), + ], +) +def test_predict_core_fallback( + rule_policy: RulePolicy, expected_confidence: float, expected_prediction: Text +): + other_intent = "other" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + - my_core_fallback + """ + ) + rule_policy.train([GREET_RULE], domain, RegexInterpreter()) + + new_conversation = DialogueStateTracker.from_events( + "bla2", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ], + ) + + action_probabilities = rule_policy.predict_action_probabilities( + new_conversation, domain + ) + + assert_predicted_action( + action_probabilities, domain, expected_prediction, expected_confidence + ) + + +def test_predict_nothing_if_fallback_disabled(): + other_intent = "other" + domain = Domain.from_yaml( + f""" + intents: + - {GREET_INTENT_NAME} + - {other_intent} + actions: + - {UTTER_GREET_ACTION} + """ + ) + policy = RulePolicy(enable_fallback_prediction=False) + policy.train([GREET_RULE], domain, RegexInterpreter()) + new_conversation = DialogueStateTracker.from_events( + "bla2", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered("haha", {"name": other_intent}), + ], + ) + action_probabilities = policy.predict_action_probabilities(new_conversation, domain) + + assert max(action_probabilities) == 0 diff --git a/tests/core/test_actions.py b/tests/core/test_actions.py index 493cba135450..1d33f49828da 100644 --- a/tests/core/test_actions.py +++ b/tests/core/test_actions.py @@ -1,3 +1,5 @@ +from typing import List + import pytest from aioresponses import aioresponses @@ -12,6 +14,8 @@ ACTION_LISTEN_NAME, ACTION_RESTART_NAME, ACTION_REVERT_FALLBACK_EVENTS_NAME, + ACTION_SESSION_START_NAME, + RULE_SNIPPET_ACTION_NAME, ActionBack, ActionDefaultAskAffirmation, ActionDefaultAskRephrase, @@ -22,14 +26,28 @@ ActionUtterTemplate, ActionRetrieveResponse, RemoteAction, + ActionSessionStart, +) +from rasa.core.actions.forms import FormAction +from rasa.core.actions.two_stage_fallback import ACTION_TWO_STAGE_FALLBACK_NAME +from rasa.core.channels import CollectingOutputChannel +from rasa.core.domain import Domain, SessionConfig +from rasa.core.events import ( + Restarted, + SlotSet, + UserUtteranceReverted, + BotUttered, + ActiveLoop, + SessionStarted, + ActionExecuted, + Event, + UserUttered, ) -from rasa.core.domain import Domain, InvalidDomain -from rasa.core.events import Restarted, SlotSet, UserUtteranceReverted, BotUttered from rasa.core.nlg.template import TemplatedNaturalLanguageGenerator -from rasa.core.trackers import DialogueStateTracker +from rasa.core.constants import USER_INTENT_SESSION_START +from rasa.core.trackers import DialogueStateTracker, ACTIVE_LOOP_KEY from rasa.utils.endpoints import ClientResponseError, EndpointConfig from tests.utilities import json_of_latest_request, latest_request -from rasa.core.constants import UTTER_PREFIX, RESPOND_PREFIX @pytest.fixture(scope="module") @@ -93,23 +111,26 @@ def test_domain_action_instantiation(): slots=[], templates={}, action_names=["my_module.ActionTest", "utter_test", "respond_test"], - form_names=[], + forms=[], ) instantiated_actions = domain.actions(None) - assert len(instantiated_actions) == 11 + assert len(instantiated_actions) == 14 assert instantiated_actions[0].name() == ACTION_LISTEN_NAME assert instantiated_actions[1].name() == ACTION_RESTART_NAME - assert instantiated_actions[2].name() == ACTION_DEFAULT_FALLBACK_NAME - assert instantiated_actions[3].name() == ACTION_DEACTIVATE_FORM_NAME - assert instantiated_actions[4].name() == ACTION_REVERT_FALLBACK_EVENTS_NAME - assert instantiated_actions[5].name() == (ACTION_DEFAULT_ASK_AFFIRMATION_NAME) - assert instantiated_actions[6].name() == (ACTION_DEFAULT_ASK_REPHRASE_NAME) - assert instantiated_actions[7].name() == ACTION_BACK_NAME - assert instantiated_actions[8].name() == "my_module.ActionTest" - assert instantiated_actions[9].name() == "utter_test" - assert instantiated_actions[10].name() == "respond_test" + assert instantiated_actions[2].name() == ACTION_SESSION_START_NAME + assert instantiated_actions[3].name() == ACTION_DEFAULT_FALLBACK_NAME + assert instantiated_actions[4].name() == ACTION_DEACTIVATE_FORM_NAME + assert instantiated_actions[5].name() == ACTION_REVERT_FALLBACK_EVENTS_NAME + assert instantiated_actions[6].name() == ACTION_DEFAULT_ASK_AFFIRMATION_NAME + assert instantiated_actions[7].name() == ACTION_DEFAULT_ASK_REPHRASE_NAME + assert instantiated_actions[8].name() == ACTION_TWO_STAGE_FALLBACK_NAME + assert instantiated_actions[9].name() == ACTION_BACK_NAME + assert instantiated_actions[10].name() == RULE_SNIPPET_ACTION_NAME + assert instantiated_actions[11].name() == "my_module.ActionTest" + assert instantiated_actions[12].name() == "utter_test" + assert instantiated_actions[13].name() == "respond_test" async def test_remote_action_runs( @@ -144,9 +165,9 @@ async def test_remote_action_runs( "intent": {}, "text": None, "message_id": None, - "metadata": None, + "metadata": {}, }, - "active_form": {}, + ACTIVE_LOOP_KEY: {}, "latest_action_name": None, "sender_id": "my-sender", "paused": False, @@ -168,7 +189,11 @@ async def test_remote_action_logs_events( response = { "events": [{"event": "slot", "value": "rasa", "name": "name"}], "responses": [ - {"text": "test text", "buttons": [{"title": "cheap", "payload": "cheap"}]}, + { + "text": "test text", + "template": None, + "buttons": [{"title": "cheap", "payload": "cheap"}], + }, {"template": "utter_greet"}, ], } @@ -194,9 +219,9 @@ async def test_remote_action_logs_events( "intent": {}, "text": None, "message_id": None, - "metadata": None, + "metadata": {}, }, - "active_form": {}, + ACTIVE_LOOP_KEY: {}, "latest_action_name": None, "sender_id": "my-sender", "paused": False, @@ -212,10 +237,60 @@ async def test_remote_action_logs_events( assert events[0] == BotUttered( "test text", {"buttons": [{"title": "cheap", "payload": "cheap"}]} ) - assert events[1] == BotUttered("hey there None!") + assert events[1] == BotUttered( + "hey there None!", metadata={"template_name": "utter_greet"} + ) assert events[2] == SlotSet("name", "rasa") +async def test_remote_action_utterances_with_none_values( + default_channel, default_tracker, default_domain +): + endpoint = EndpointConfig("https://example.com/webhooks/actions") + remote_action = action.RemoteAction("my_action", endpoint) + + response = { + "events": [ + {"event": "form", "name": "restaurant_form", "timestamp": None}, + { + "event": "slot", + "timestamp": None, + "name": "requested_slot", + "value": "cuisine", + }, + ], + "responses": [ + { + "text": None, + "buttons": None, + "elements": [], + "custom": None, + "template": "utter_ask_cuisine", + "image": None, + "attachment": None, + } + ], + } + + nlg = TemplatedNaturalLanguageGenerator( + {"utter_ask_cuisine": [{"text": "what dou want to eat?"}]} + ) + with aioresponses() as mocked: + mocked.post("https://example.com/webhooks/actions", payload=response) + + events = await remote_action.run( + default_channel, nlg, default_tracker, default_domain + ) + + assert events == [ + BotUttered( + "what dou want to eat?", metadata={"template_name": "utter_ask_cuisine"} + ), + ActiveLoop("restaurant_form"), + SlotSet("requested_slot", "cuisine"), + ] + + async def test_remote_action_without_endpoint( default_channel, default_nlg, default_tracker, default_domain ): @@ -288,7 +363,12 @@ async def test_action_utter_retrieved_response( default_tracker.latest_message = UserMessage( "Who are you?", parse_data={ - "response_selector": {"chitchat": {"response": {"name": "I am a bot."}}} + "response_selector": { + "chitchat": { + "response": {"text": "I am a bot."}, + "full_retrieval_intent": "chitchat/ask_name", + } + } }, ) events = await ActionRetrieveResponse(action_name).run( @@ -298,6 +378,9 @@ async def test_action_utter_retrieved_response( assert events[0].as_dict().get("text") == BotUttered("I am a bot.").as_dict().get( "text" ) + assert ( + events[0].as_dict().get("metadata").get("template_name") == "chitchat/ask_name" + ) async def test_action_utter_default_retrieved_response( @@ -309,7 +392,12 @@ async def test_action_utter_default_retrieved_response( default_tracker.latest_message = UserMessage( "Who are you?", parse_data={ - "response_selector": {"default": {"response": {"name": "I am a bot."}}} + "response_selector": { + "default": { + "response": {"text": "I am a bot."}, + "full_retrieval_intent": "chitchat/ask_name", + } + } }, ) events = await ActionRetrieveResponse(action_name).run( @@ -330,7 +418,12 @@ async def test_action_utter_retrieved_empty_response( default_tracker.latest_message = UserMessage( "Who are you?", parse_data={ - "response_selector": {"dummy": {"response": {"name": "I am a bot."}}} + "response_selector": { + "dummy": { + "response": {"text": "I am a bot."}, + "full_retrieval_intent": "chitchat/ask_name", + } + } }, ) events = await ActionRetrieveResponse(action_name).run( @@ -347,7 +440,11 @@ async def test_action_utter_template( default_channel, default_nlg, default_tracker, default_domain ) - assert events == [BotUttered("this is a default channel")] + assert events == [ + BotUttered( + "this is a default channel", metadata={"template_name": "utter_channel"} + ) + ] async def test_action_utter_template_unknown_template( @@ -376,6 +473,7 @@ async def test_action_utter_template_with_buttons( {"payload": "button2", "title": "button2"}, ] }, + metadata={"template_name": "utter_buttons"}, ) ] @@ -404,7 +502,10 @@ async def test_action_utter_template_channel_specific( ) assert events == [ - BotUttered("you're talking to me on slack!", metadata={"channel": "slack"}) + BotUttered( + "you're talking to me on slack!", + metadata={"channel": "slack", "template_name": "utter_channel"}, + ) ] @@ -416,7 +517,7 @@ async def test_action_back( ) assert events == [ - BotUttered("backing up..."), + BotUttered("backing up...", metadata={"template_name": "utter_back"}), UserUtteranceReverted(), UserUtteranceReverted(), ] @@ -429,7 +530,95 @@ async def test_action_restart( default_channel, template_nlg, template_sender_tracker, default_domain ) - assert events == [BotUttered("congrats, you've restarted me!"), Restarted()] + assert events == [ + BotUttered( + "congrats, you've restarted me!", + metadata={"template_name": "utter_restart"}, + ), + Restarted(), + ] + + +async def test_action_session_start_without_slots( + default_channel: CollectingOutputChannel, + template_nlg: TemplatedNaturalLanguageGenerator, + template_sender_tracker: DialogueStateTracker, + default_domain: Domain, +): + events = await ActionSessionStart().run( + default_channel, template_nlg, template_sender_tracker, default_domain + ) + assert events == [SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME)] + + +@pytest.mark.parametrize( + "session_config, expected_events", + [ + ( + SessionConfig(123, True), + [ + SessionStarted(), + SlotSet("my_slot", "value"), + SlotSet("another-slot", "value2"), + ActionExecuted(action_name=ACTION_LISTEN_NAME), + ], + ), + ( + SessionConfig(123, False), + [SessionStarted(), ActionExecuted(action_name=ACTION_LISTEN_NAME)], + ), + ], +) +async def test_action_session_start_with_slots( + default_channel: CollectingOutputChannel, + template_nlg: TemplatedNaturalLanguageGenerator, + template_sender_tracker: DialogueStateTracker, + default_domain: Domain, + session_config: SessionConfig, + expected_events: List[Event], +): + # set a few slots on tracker + slot_set_event_1 = SlotSet("my_slot", "value") + slot_set_event_2 = SlotSet("another-slot", "value2") + for event in [slot_set_event_1, slot_set_event_2]: + template_sender_tracker.update(event) + + default_domain.session_config = session_config + + events = await ActionSessionStart().run( + default_channel, template_nlg, template_sender_tracker, default_domain + ) + + assert events == expected_events + + # make sure that the list of events has ascending timestamps + assert sorted(events, key=lambda x: x.timestamp) == events + + +async def test_applied_events_after_action_session_start( + default_channel: CollectingOutputChannel, + template_nlg: TemplatedNaturalLanguageGenerator, +): + slot_set = SlotSet("my_slot", "value") + events = [ + slot_set, + ActionExecuted(ACTION_LISTEN_NAME), + # User triggers a restart manually by triggering the intent + UserUttered( + text=f"/{USER_INTENT_SESSION_START}", + intent={"name": USER_INTENT_SESSION_START}, + ), + ] + tracker = DialogueStateTracker.from_events("🕵️‍♀️", events) + + # Mapping Policy kicks in and runs the session restart action + events = await ActionSessionStart().run( + default_channel, template_nlg, tracker, Domain.empty() + ) + for event in events: + tracker.update(event) + + assert tracker.applied_events() == [slot_set, ActionExecuted(ACTION_LISTEN_NAME)] async def test_action_default_fallback( @@ -440,7 +629,10 @@ async def test_action_default_fallback( ) assert events == [ - BotUttered("sorry, I didn't get that, can you rephrase it?"), + BotUttered( + "sorry, I didn't get that, can you rephrase it?", + metadata={"template_name": "utter_default"}, + ), UserUtteranceReverted(), ] @@ -461,6 +653,7 @@ async def test_action_default_ask_affirmation( {"title": "No", "payload": "/out_of_scope"}, ] }, + {"template_name": "action_default_ask_affirmation"}, ) ] @@ -472,4 +665,53 @@ async def test_action_default_ask_rephrase( default_channel, template_nlg, template_sender_tracker, default_domain ) - assert events == [BotUttered("can you rephrase that?")] + assert events == [ + BotUttered( + "can you rephrase that?", metadata={"template_name": "utter_ask_rephrase"} + ) + ] + + +def test_get_form_action(): + form_action_name = "my_business_logic" + domain = Domain.from_yaml( + f""" + actions: + - my_action + forms: + - {form_action_name}: + my_slot: + - type: from_text + """ + ) + + actual = domain.action_for_name(form_action_name, None) + assert isinstance(actual, FormAction) + + +def test_get_form_action_without_slot_mapping(): + form_action_name = "my_business_logic" + domain = Domain.from_yaml( + f""" + actions: + - my_action + forms: + - {form_action_name} + """ + ) + + actual = domain.action_for_name(form_action_name, None) + assert isinstance(actual, RemoteAction) + + +def test_get_form_action_if_not_in_forms(): + form_action_name = "my_business_logic" + domain = Domain.from_yaml( + """ + actions: + - my_action + """ + ) + + with pytest.raises(NameError): + assert not domain.action_for_name(form_action_name, None) diff --git a/tests/core/test_agent.py b/tests/core/test_agent.py index 0b9492eb18b9..c1d7dcb0cd20 100644 --- a/tests/core/test_agent.py +++ b/tests/core/test_agent.py @@ -1,39 +1,38 @@ import asyncio -from typing import Text +from typing import Any, Dict, Text, List, Callable, Optional +from unittest.mock import Mock import pytest -from async_generator import async_generator, yield_ +from _pytest.logging import LogCaptureFixture +from _pytest.monkeypatch import MonkeyPatch +from pytest_sanic.utils import TestClient from sanic import Sanic, response +from sanic.request import Request +from sanic.response import StreamingHTTPResponse -import rasa.utils.io import rasa.core -from rasa.core import config, jobs, utils +from rasa.core.policies.form_policy import FormPolicy +from rasa.core.policies.rule_policy import RulePolicy +from rasa.core.policies.ted_policy import TEDPolicy +from rasa.core.policies.mapping_policy import MappingPolicy +import rasa.utils.io +from rasa.core import jobs, utils from rasa.core.agent import Agent, load_agent -from rasa.core.domain import Domain, InvalidDomain from rasa.core.channels.channel import UserMessage +from rasa.core.domain import Domain, InvalidDomain from rasa.core.interpreter import INTENT_MESSAGE_PREFIX -from rasa.core.policies.ensemble import PolicyEnsemble -from rasa.core.policies.memoization import AugmentedMemoizationPolicy +from rasa.core.policies.ensemble import PolicyEnsemble, SimplePolicyEnsemble +from rasa.core.policies.memoization import AugmentedMemoizationPolicy, MemoizationPolicy from rasa.utils.endpoints import EndpointConfig - from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - -def model_server_app(model_path: Text, model_hash: Text = "somehash"): +def model_server_app(model_path: Text, model_hash: Text = "somehash") -> Sanic: app = Sanic(__name__) app.number_of_model_requests = 0 @app.route("/model", methods=["GET"]) - async def model(request): + async def model(request: Request) -> StreamingHTTPResponse: """Simple HTTP model server responding with a trained model.""" if model_hash == request.headers.get("If-None-Match"): @@ -50,18 +49,16 @@ async def model(request): return app -@pytest.fixture -@async_generator -async def model_server(test_server, trained_moodbot_path): - server = await test_server( - model_server_app(trained_moodbot_path, model_hash="somehash") - ) - await yield_(server) # python 3.5 compatibility - await server.close() +@pytest.fixture() +def model_server( + loop: asyncio.AbstractEventLoop, sanic_client: Callable, trained_moodbot_path: Text +) -> TestClient: + app = model_server_app(trained_moodbot_path, model_hash="somehash") + return loop.run_until_complete(sanic_client(app)) -async def test_training_data_is_reproducible(tmpdir, default_domain): - training_data_file = "examples/moodbot/data/stories.md" +async def test_training_data_is_reproducible(): + training_data_file = "examples/moodbot/data/stories.yml" agent = Agent( "examples/moodbot/domain.yml", policies=[AugmentedMemoizationPolicy()] ) @@ -75,30 +72,25 @@ async def test_training_data_is_reproducible(tmpdir, default_domain): assert str(x.as_dialogue()) == str(same_training_data[i].as_dialogue()) -async def test_agent_train(tmpdir, default_domain): - training_data_file = "examples/moodbot/data/stories.md" - agent = Agent( - "examples/moodbot/domain.yml", policies=[AugmentedMemoizationPolicy()] - ) - - training_data = await agent.load_data(training_data_file) - - agent.train(training_data) - agent.persist(tmpdir.strpath) - - loaded = Agent.load(tmpdir.strpath) +async def test_agent_train(trained_moodbot_path: Text): + moodbot_domain = Domain.load("examples/moodbot/domain.yml") + loaded = Agent.load(trained_moodbot_path) # test domain - assert loaded.domain.action_names == agent.domain.action_names - assert loaded.domain.intents == agent.domain.intents - assert loaded.domain.entities == agent.domain.entities - assert loaded.domain.templates == agent.domain.templates - assert [s.name for s in loaded.domain.slots] == [s.name for s in agent.domain.slots] + assert loaded.domain.action_names == moodbot_domain.action_names + assert loaded.domain.intents == moodbot_domain.intents + assert loaded.domain.entities == moodbot_domain.entities + assert loaded.domain.templates == moodbot_domain.templates + assert [s.name for s in loaded.domain.slots] == [ + s.name for s in moodbot_domain.slots + ] # test policies - assert isinstance(loaded.policy_ensemble, type(agent.policy_ensemble)) + assert isinstance(loaded.policy_ensemble, SimplePolicyEnsemble) assert [type(p) for p in loaded.policy_ensemble.policies] == [ - type(p) for p in agent.policy_ensemble.policies + TEDPolicy, + MemoizationPolicy, + RulePolicy, ] @@ -128,13 +120,13 @@ async def test_agent_train(tmpdir, default_domain): ], ) async def test_agent_parse_message_using_nlu_interpreter( - default_agent, text_message_data, expected + default_agent: Agent, text_message_data: Text, expected: Dict[Text, Any] ): result = await default_agent.parse_message_using_nlu_interpreter(text_message_data) assert result == expected -async def test_agent_handle_text(default_agent): +async def test_agent_handle_text(default_agent: Agent): text = INTENT_MESSAGE_PREFIX + 'greet{"name":"Rasa"}' result = await default_agent.handle_text(text, sender_id="test_agent_handle_text") assert result == [ @@ -142,7 +134,7 @@ async def test_agent_handle_text(default_agent): ] -async def test_agent_handle_message(default_agent): +async def test_agent_handle_message(default_agent: Agent): text = INTENT_MESSAGE_PREFIX + 'greet{"name":"Rasa"}' message = UserMessage(text, sender_id="test_agent_handle_message") result = await default_agent.handle_message(message) @@ -151,8 +143,8 @@ async def test_agent_handle_message(default_agent): ] -def test_agent_wrong_use_of_load(tmpdir, default_domain): - training_data_file = "examples/moodbot/data/stories.md" +def test_agent_wrong_use_of_load(): + training_data_file = "examples/moodbot/data/stories.yml" agent = Agent( "examples/moodbot/domain.yml", policies=[AugmentedMemoizationPolicy()] ) @@ -164,7 +156,7 @@ def test_agent_wrong_use_of_load(tmpdir, default_domain): async def test_agent_with_model_server_in_thread( - model_server, moodbot_domain, moodbot_metadata + model_server: TestClient, moodbot_domain: Domain, moodbot_metadata: Any ): model_endpoint_config = EndpointConfig.from_dict( {"url": model_server.make_url("/model"), "wait_time_between_pulls": 2} @@ -175,7 +167,7 @@ async def test_agent_with_model_server_in_thread( agent, model_server=model_endpoint_config ) - await asyncio.sleep(3) + await asyncio.sleep(5) assert agent.fingerprint == "somehash" assert hash(agent.domain) == hash(moodbot_domain) @@ -189,8 +181,9 @@ async def test_agent_with_model_server_in_thread( jobs.kill_scheduler() -async def test_wait_time_between_pulls_without_interval(model_server, monkeypatch): - +async def test_wait_time_between_pulls_without_interval( + model_server: TestClient, monkeypatch: MonkeyPatch +): monkeypatch.setattr( "rasa.core.agent.schedule_model_pulling", lambda *args: 1 / 0 ) # will raise an exception @@ -200,13 +193,34 @@ async def test_wait_time_between_pulls_without_interval(model_server, monkeypatc ) agent = Agent() - # schould not call schedule_model_pulling, if it does, this will raise + # should not call schedule_model_pulling, if it does, this will raise await rasa.core.agent.load_from_server(agent, model_server=model_endpoint_config) - jobs.kill_scheduler() -async def test_load_agent(trained_model): - agent = await load_agent(model_path=trained_model) +async def test_pull_model_with_invalid_domain( + model_server: TestClient, monkeypatch: MonkeyPatch, caplog: LogCaptureFixture +): + # mock `Domain.load()` as if the domain contains invalid YAML + error_message = "domain is invalid" + mock_load = Mock(side_effect=InvalidDomain(error_message)) + + monkeypatch.setattr(Domain, "load", mock_load) + model_endpoint_config = EndpointConfig.from_dict( + {"url": model_server.make_url("/model"), "wait_time_between_pulls": None} + ) + + agent = Agent() + await rasa.core.agent.load_from_server(agent, model_server=model_endpoint_config) + + # `Domain.load()` was called + mock_load.assert_called_once() + + # error was logged + assert error_message in caplog.text + + +async def test_load_agent(trained_rasa_model: Text): + agent = await load_agent(model_path=trained_rasa_model) assert agent.tracker_store is not None assert agent.interpreter is not None @@ -214,18 +228,32 @@ async def test_load_agent(trained_model): @pytest.mark.parametrize( - "domain, policy_config", - [({"forms": ["restaurant_form"]}, {"policies": [{"name": "MemoizationPolicy"}]})], + "policy_config", [{"policies": [{"name": "MemoizationPolicy"}]}] ) -def test_form_without_form_policy(domain, policy_config): +def test_form_without_form_policy(policy_config: Dict[Text, List[Text]]): with pytest.raises(InvalidDomain) as execinfo: Agent( - domain=Domain.from_dict(domain), + domain=Domain.from_dict({"forms": ["restaurant_form"]}), policies=PolicyEnsemble.from_dict(policy_config), ) assert "haven't added the FormPolicy" in str(execinfo.value) +@pytest.mark.parametrize( + "policy_config", + [ + {"policies": [{"name": FormPolicy.__name__}]}, + {"policies": [{"name": RulePolicy.__name__}]}, + ], +) +def test_forms_with_suited_policy(policy_config: Dict[Text, List[Text]]): + # Doesn't raise + Agent( + domain=Domain.from_dict({"forms": ["restaurant_form"]}), + policies=PolicyEnsemble.from_dict(policy_config), + ) + + @pytest.mark.parametrize( "domain, policy_config", [ @@ -238,7 +266,9 @@ def test_form_without_form_policy(domain, policy_config): ) ], ) -def test_trigger_without_mapping_policy(domain, policy_config): +def test_trigger_without_mapping_policy( + domain: Dict[Text, Any], policy_config: Dict[Text, Any] +): with pytest.raises(InvalidDomain) as execinfo: Agent( domain=Domain.from_dict(domain), @@ -249,30 +279,104 @@ def test_trigger_without_mapping_policy(domain, policy_config): @pytest.mark.parametrize( "domain, policy_config", - [({"intents": ["affirm"]}, {"policies": [{"name": "TwoStageFallbackPolicy"}]})], + [ + ( + {"intents": ["affirm"]}, + { + "policies": [ + { + "name": "TwoStageFallbackPolicy", + "deny_suggestion_intent_name": "deny", + } + ] + }, + ) + ], ) -def test_two_stage_fallback_without_deny_suggestion(domain, policy_config): +def test_two_stage_fallback_without_deny_suggestion( + domain: Dict[Text, Any], policy_config: Dict[Text, Any] +): with pytest.raises(InvalidDomain) as execinfo: Agent( domain=Domain.from_dict(domain), policies=PolicyEnsemble.from_dict(policy_config), ) - assert "The intent 'out_of_scope' must be present" in str(execinfo.value) + assert "The intent 'deny' must be present" in str(execinfo.value) + + +@pytest.mark.parametrize( + "domain, policy_config", + [ + ( + {"actions": ["other-action"]}, + { + "policies": [ + {"name": "RulePolicy", "core_fallback_action_name": "my_fallback"} + ] + }, + ) + ], +) +def test_rule_policy_without_fallback_action_present( + domain: Dict[Text, Any], policy_config: Dict[Text, Any] +): + with pytest.raises(InvalidDomain) as execinfo: + Agent( + domain=Domain.from_dict(domain), + policies=PolicyEnsemble.from_dict(policy_config), + ) + + assert RulePolicy.__name__ in execinfo.value.message + + +@pytest.mark.parametrize( + "domain, policy_config", + [ + ( + {"actions": ["other-action"]}, + { + "policies": [ + { + "name": "RulePolicy", + "core_fallback_action_name": "my_fallback", + "enable_fallback_prediction": False, + } + ] + }, + ), + ( + {"actions": ["my-action"]}, + { + "policies": [ + {"name": "RulePolicy", "core_fallback_action_name": "my-action"} + ] + }, + ), + ({}, {"policies": [{"name": "MemoizationPolicy"}]}), + ], +) +def test_rule_policy_valid(domain: Dict[Text, Any], policy_config: Dict[Text, Any]): + # no exception should be thrown + Agent( + domain=Domain.from_dict(domain), + policies=PolicyEnsemble.from_dict(policy_config), + ) -async def test_agent_update_model_none_domain(trained_model): - agent = await load_agent(model_path=trained_model) +async def test_agent_update_model_none_domain(trained_rasa_model: Text): + agent = await load_agent(model_path=trained_rasa_model) agent.update_model( None, None, agent.fingerprint, agent.interpreter, agent.model_directory ) + assert agent.domain is not None sender_id = "test_sender_id" message = UserMessage("hello", sender_id=sender_id) await agent.handle_message(message) tracker = agent.tracker_store.get_or_create_tracker(sender_id) # UserUttered event was added to tracker, with correct intent data - assert tracker.events[1].intent["name"] == "greet" + assert tracker.events[3].intent["name"] == "greet" async def test_load_agent_on_not_existing_path(): @@ -290,6 +394,6 @@ async def test_load_agent_on_not_existing_path(): None, ], ) -async def test_agent_load_on_invalid_model_path(model_path): +async def test_agent_load_on_invalid_model_path(model_path: Optional[Text]): with pytest.raises(ValueError): Agent.load(model_path) diff --git a/tests/core/test_broker.py b/tests/core/test_broker.py index aeb0e6be9f3e..05484146c49c 100644 --- a/tests/core/test_broker.py +++ b/tests/core/test_broker.py @@ -1,10 +1,19 @@ import json +import logging +from pathlib import Path -import rasa.core.brokers.utils as broker_utils -from rasa.core.brokers.file_producer import FileProducer -from rasa.core.brokers.kafka import KafkaProducer -from rasa.core.brokers.pika import PikaProducer -from rasa.core.brokers.sql import SQLProducer +from typing import Union, Text, List, Optional, Type + +import pytest +from _pytest.logging import LogCaptureFixture + +from _pytest.monkeypatch import MonkeyPatch + +from rasa.core.brokers.broker import EventBroker +from rasa.core.brokers.file import FileEventBroker +from rasa.core.brokers.kafka import KafkaEventBroker +from rasa.core.brokers.pika import PikaEventBroker, DEFAULT_QUEUE_NAME +from rasa.core.brokers.sql import SQLEventBroker from rasa.core.events import Event, Restarted, SlotSet, UserUttered from rasa.utils.endpoints import EndpointConfig, read_endpoint_config from tests.core.conftest import DEFAULT_ENDPOINTS_FILE @@ -20,18 +29,69 @@ def test_pika_broker_from_config(): cfg = read_endpoint_config( "data/test_endpoints/event_brokers/pika_endpoint.yml", "event_broker" ) - actual = broker_utils.from_endpoint_config(cfg) + actual = EventBroker.create(cfg) - assert isinstance(actual, PikaProducer) + assert isinstance(actual, PikaEventBroker) assert actual.host == "localhost" assert actual.username == "username" - assert actual.queue == "queue" + assert actual.queues == ["queue-1"] + + +# noinspection PyProtectedMember +def test_pika_message_property_app_id(monkeypatch: MonkeyPatch): + # patch PikaEventBroker so it doesn't try to connect to RabbitMQ on init + monkeypatch.setattr(PikaEventBroker, "_run_pika", lambda _: None) + pika_producer = PikaEventBroker("", "", "") + + # unset RASA_ENVIRONMENT env var results in empty App ID + monkeypatch.delenv("RASA_ENVIRONMENT", raising=False) + assert not pika_producer._get_message_properties().app_id + + # setting it to some value results in that value as the App ID + rasa_environment = "some-test-environment" + monkeypatch.setenv("RASA_ENVIRONMENT", rasa_environment) + assert pika_producer._get_message_properties().app_id == rasa_environment + + +@pytest.mark.parametrize( + "queue_arg,queues_arg,expected,warning", + [ + # default case + (None, ["q1"], ["q1"], None), + # only provide `queue` + ("q1", None, ["q1"], FutureWarning), + # supplying a list for `queue` works too + (["q1", "q2"], None, ["q1", "q2"], FutureWarning), + # `queues` arg supplied, takes precedence + ("q1", "q2", ["q2"], FutureWarning), + # same, but with a list + ("q1", ["q2", "q3"], ["q2", "q3"], FutureWarning), + # only supplying `queues` works, and queues is a string + (None, "q1", ["q1"], None), + # no queues provided. Use default queue and print warning. + (None, None, [DEFAULT_QUEUE_NAME], UserWarning), + ], +) +def test_pika_queues_from_args( + queue_arg: Union[Text, List[Text], None], + queues_arg: Union[Text, List[Text], None], + expected: List[Text], + warning: Optional[Type[Warning]], + monkeypatch: MonkeyPatch, +): + # patch PikaEventBroker so it doesn't try to connect to RabbitMQ on init + monkeypatch.setattr(PikaEventBroker, "_run_pika", lambda _: None) + + with pytest.warns(warning): + pika_producer = PikaEventBroker("", "", "", queues=queues_arg, queue=queue_arg) + + assert pika_producer.queues == expected def test_no_broker_in_config(): cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "event_broker") - actual = broker_utils.from_endpoint_config(cfg) + actual = EventBroker.create(cfg) assert actual is None @@ -40,9 +100,9 @@ def test_sql_broker_from_config(): cfg = read_endpoint_config( "data/test_endpoints/event_brokers/sql_endpoint.yml", "event_broker" ) - actual = broker_utils.from_endpoint_config(cfg) + actual = EventBroker.create(cfg) - assert isinstance(actual, SQLProducer) + assert isinstance(actual, SQLEventBroker) assert actual.engine.name == "sqlite" @@ -50,7 +110,9 @@ def test_sql_broker_logs_to_sql_db(): cfg = read_endpoint_config( "data/test_endpoints/event_brokers/sql_endpoint.yml", "event_broker" ) - actual = broker_utils.from_endpoint_config(cfg) + actual = EventBroker.create(cfg) + + assert isinstance(actual, SQLEventBroker) for e in TEST_EVENTS: actual.publish(e.as_dict()) @@ -68,17 +130,17 @@ def test_file_broker_from_config(): cfg = read_endpoint_config( "data/test_endpoints/event_brokers/file_endpoint.yml", "event_broker" ) - actual = broker_utils.from_endpoint_config(cfg) + actual = EventBroker.create(cfg) - assert isinstance(actual, FileProducer) + assert isinstance(actual, FileEventBroker) assert actual.path == "rasa_event.log" -def test_file_broker_logs_to_file(tmpdir): - fname = tmpdir.join("events.log").strpath +def test_file_broker_logs_to_file(tmp_path: Path): + log_file_path = str(tmp_path / "events.log") - actual = broker_utils.from_endpoint_config( - EndpointConfig(**{"type": "file", "path": fname}) + actual = EventBroker.create( + EndpointConfig(**{"type": "file", "path": log_file_path}) ) for e in TEST_EVENTS: @@ -86,18 +148,18 @@ def test_file_broker_logs_to_file(tmpdir): # reading the events from the file one event per line recovered = [] - with open(fname, "r") as f: - for l in f: - recovered.append(Event.from_parameters(json.loads(l))) + with open(log_file_path, "r") as log_file: + for line in log_file: + recovered.append(Event.from_parameters(json.loads(line))) assert recovered == TEST_EVENTS -def test_file_broker_properly_logs_newlines(tmpdir): - fname = tmpdir.join("events.log").strpath +def test_file_broker_properly_logs_newlines(tmp_path): + log_file_path = str(tmp_path / "events.log") - actual = broker_utils.from_endpoint_config( - EndpointConfig(**{"type": "file", "path": fname}) + actual = EventBroker.create( + EndpointConfig(**{"type": "file", "path": log_file_path}) ) event_with_newline = UserUttered("hello \n there") @@ -106,30 +168,30 @@ def test_file_broker_properly_logs_newlines(tmpdir): # reading the events from the file one event per line recovered = [] - with open(fname, "r") as f: - for l in f: - recovered.append(Event.from_parameters(json.loads(l))) + with open(log_file_path, "r") as log_file: + for line in log_file: + recovered.append(Event.from_parameters(json.loads(line))) assert recovered == [event_with_newline] def test_load_custom_broker_name(): - config = EndpointConfig(**{"type": "rasa.core.brokers.file_producer.FileProducer"}) - assert broker_utils.from_endpoint_config(config) + config = EndpointConfig(**{"type": "rasa.core.brokers.file.FileEventBroker"}) + assert EventBroker.create(config) def test_load_non_existent_custom_broker_name(): config = EndpointConfig(**{"type": "rasa.core.brokers.my.MyProducer"}) - assert broker_utils.from_endpoint_config(config) is None + assert EventBroker.create(config) is None def test_kafka_broker_from_config(): endpoints_path = "data/test_endpoints/event_brokers/kafka_plaintext_endpoint.yml" cfg = read_endpoint_config(endpoints_path, "event_broker") - actual = KafkaProducer.from_endpoint_config(cfg) + actual = KafkaEventBroker.from_endpoint_config(cfg) - expected = KafkaProducer( + expected = KafkaEventBroker( "localhost", "username", "password", @@ -141,3 +203,27 @@ def test_kafka_broker_from_config(): assert actual.sasl_username == expected.sasl_username assert actual.sasl_password == expected.sasl_password assert actual.topic == expected.topic + + +def test_no_pika_logs_if_no_debug_mode(caplog: LogCaptureFixture): + from rasa.core.brokers import pika + + with caplog.at_level(logging.INFO): + with pytest.raises(Exception): + pika.initialise_pika_connection( + "localhost", "user", "password", connection_attempts=1 + ) + + assert len(caplog.records) == 0 + + +def test_pika_logs_in_debug_mode(caplog: LogCaptureFixture, monkeypatch: MonkeyPatch): + from rasa.core.brokers import pika + + with caplog.at_level(logging.DEBUG): + with pytest.raises(Exception): + pika.initialise_pika_connection( + "localhost", "user", "password", connection_attempts=1 + ) + + assert len(caplog.records) > 0 diff --git a/tests/core/test_channels.py b/tests/core/test_channels.py index b582376f2112..3944219d09ee 100644 --- a/tests/core/test_channels.py +++ b/tests/core/test_channels.py @@ -1,27 +1,65 @@ import json import logging -from unittest.mock import patch, MagicMock +from typing import Dict +from unittest.mock import patch, MagicMock, Mock import pytest -import responses +from _pytest.monkeypatch import MonkeyPatch +from aiohttp import ClientTimeout from aioresponses import aioresponses from sanic import Sanic import rasa.core.run from rasa.core import utils +from rasa.core.channels import RasaChatInput, console from rasa.core.channels.channel import UserMessage +from rasa.core.channels.rasa_chat import ( + JWT_USERNAME_KEY, + CONVERSATION_ID_KEY, + INTERACTIVE_LEARNING_PERMISSION, +) from rasa.core.channels.telegram import TelegramOutput from rasa.utils.endpoints import EndpointConfig from tests.core import utilities -from tests.core.conftest import MOODBOT_MODEL_PATH # this is needed so that the tests included as code examples look better from tests.utilities import json_of_latest_request, latest_request -MODEL_PATH = MOODBOT_MODEL_PATH logger = logging.getLogger(__name__) +SLACK_TEST_ATTACHMENT = { + "fallback": "Financial Advisor Summary", + "color": "#36a64f", + "author_name": "ABE", + "title": "Financial Advisor Summary", + "title_link": "http://tenfactorialrocks.com", + "image_url": "https://r.com/cancel/r12", + "thumb_url": "https://r.com/cancel/r12", + "actions": [ + { + "type": "button", + "text": "\ud83d\udcc8 Dashboard", + "url": "https://r.com/cancel/r12", + "style": "primary", + }, + { + "type": "button", + "text": "\ud83d\udccb Download XL", + "url": "https://r.com/cancel/r12", + "style": "danger", + }, + { + "type": "button", + "text": "\ud83d\udce7 E-Mail", + "url": "https://r.com/cancel/r12", + "style": "danger", + }, + ], + "footer": "Powered by 1010rocks", + "ts": 1531889719, +} + def fake_sanic_run(*args, **kwargs): """Used to replace `run` method of a Sanic server to avoid hanging.""" @@ -50,6 +88,9 @@ def fake_send_message(*args, **kwargs): async def test_send_response(default_channel, default_tracker): text_only_message = {"text": "hey"} + multiline_text_message = { + "text": "This message should come first: \n\nThis is message two \nThis as well\n\n" + } image_only_message = {"image": "https://i.imgur.com/nGF1K8f.jpg"} text_and_image_message = { "text": "look at this", @@ -61,6 +102,9 @@ async def test_send_response(default_channel, default_tracker): } await default_channel.send_response(default_tracker.sender_id, text_only_message) + await default_channel.send_response( + default_tracker.sender_id, multiline_text_message + ) await default_channel.send_response(default_tracker.sender_id, image_only_message) await default_channel.send_response( default_tracker.sender_id, text_and_image_message @@ -68,25 +112,35 @@ async def test_send_response(default_channel, default_tracker): await default_channel.send_response(default_tracker.sender_id, custom_json_message) collected = default_channel.messages - assert len(collected) == 6 + assert len(collected) == 8 # text only message assert collected[0] == {"recipient_id": "my-sender", "text": "hey"} - # image only message + # multiline text message, should split on '\n\n' assert collected[1] == { "recipient_id": "my-sender", - "image": "https://i.imgur.com/nGF1K8f.jpg", + "text": "This message should come first: ", + } + assert collected[2] == { + "recipient_id": "my-sender", + "text": "This is message two \nThis as well", } - # text & image combined - will result in two messages - assert collected[2] == {"recipient_id": "my-sender", "text": "look at this"} + # image only message assert collected[3] == { "recipient_id": "my-sender", - "image": "https://i.imgur.com/T5xVo.jpg", + "image": "https://i.imgur.com/nGF1K8f.jpg", } + + # text & image combined - will result in two messages assert collected[4] == {"recipient_id": "my-sender", "text": "look at this"} assert collected[5] == { + "recipient_id": "my-sender", + "image": "https://i.imgur.com/T5xVo.jpg", + } + assert collected[6] == {"recipient_id": "my-sender", "text": "look at this"} + assert collected[7] == { "recipient_id": "my-sender", "custom": {"some_random_arg": "value", "another_arg": "value2"}, } @@ -106,7 +160,9 @@ async def test_console_input(): ) await console.record_messages( - server_url="https://example.com", max_message_limit=3 + server_url="https://example.com", + max_message_limit=3, + sender_id="default", ) r = latest_request( @@ -201,13 +257,11 @@ def test_mattermost_channel(): input_channel = MattermostInput( # this is the url of the api for your mattermost instance url="http://chat.example.com/api/v4", - # the name of your team for mattermost - team="community", - # the username of your bot user that will post - user="user@email.com", - # messages - pw="password" + # the bot token of the bot account that will post messages + token="xxxxx", # the password of your bot user that will post messages + # the webhook-url your bot should listen for messages + webhook_url="YOUR_WEBHOOK_URL", ) s = rasa.core.run.configure_app([input_channel], port=5004) @@ -409,6 +463,247 @@ async def test_callback_calls_endpoint(): assert text["text"] == "Hi there!" +def test_botframework_attachments(): + from rasa.core.channels.botframework import BotFrameworkInput, BotFramework + from copy import deepcopy + + ch = BotFrameworkInput("app_id", "app_pass") + + payload = { + "type": "message", + "id": "123", + "channelId": "msteams", + "serviceUrl": "https://smba.trafficmanager.net/emea/", + "from": {"id": "12:123", "name": "Rasa", "aadObjectId": "123"}, + "conversation": { + "conversationType": "personal", + "tenantId": "123", + "id": "a:123", + }, + "recipient": {"id": "12:123", "name": "Rasa chat"}, + } + assert ch.add_attachments_to_metadata(payload, None) is None + + attachments = [ + { + "contentType": "application/vnd.microsoft.teams.file.download.info", + "content": { + "downloadUrl": "https://test.sharepoint.com/personal/rasa/123", + "uniqueId": "123", + "fileType": "csv", + }, + "contentUrl": "https://test.sharepoint.com/personal/rasa/123", + "name": "rasa-test.csv", + } + ] + payload["attachments"] = attachments + + assert ch.add_attachments_to_metadata(payload, None) == {"attachments": attachments} + + metadata = {"test": 1, "bigger_test": {"key": "value"}} + updated_metadata = deepcopy(metadata) + updated_metadata.update({"attachments": attachments}) + + assert ch.add_attachments_to_metadata(payload, metadata) == updated_metadata + + +def test_slack_metadata(): + from rasa.core.channels.slack import SlackInput + + user = "user1" + channel = "channel1" + authed_users = ["XXXXXXX", "YYYYYYY", "ZZZZZZZ"] + ts = "1579802617.000800" + header = {"content-type": "application/json"} + direct_message_event = { + "authed_users": authed_users, + "event": { + "client_msg_id": "XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "type": "message", + "text": "hello world", + "user": user, + "ts": ts, + "team": "XXXXXXXXX", + "blocks": [ + { + "type": "rich_text", + "block_id": "XXXXX", + "elements": [ + { + "type": "rich_text_section", + "elements": [{"type": "text", "text": "hi"}], + } + ], + } + ], + "channel": channel, + "event_ts": "1579802617.000800", + "channel_type": "im", + }, + } + + input_channel = SlackInput( + slack_token="YOUR_SLACK_TOKEN", slack_channel="YOUR_SLACK_CHANNEL" + ) + + r = Mock() + r.json = direct_message_event + r.headers = header + metadata = input_channel.get_metadata(request=r) + assert metadata["out_channel"] == channel + assert metadata["users"] == authed_users + assert metadata["thread_id"] == ts + + +def test_slack_form_metadata(): + from rasa.core.channels.slack import SlackInput + + user = "user1" + channel = "channel1" + authed_user = "XXXXXXX" + ts = "1579802617.000800" + header = {"content-type": "application/x-www-form-urlencoded"} + payload = { + "type": "block_actions", + "user": {"id": authed_user, "username": user, "name": "name"}, + "channel": {"id": channel}, + "message": { + "type": "message", + "text": "text", + "user": authed_user, + "ts": ts, + "blocks": [ + { + "type": "actions", + "block_id": "XXXXX", + "elements": [ + { + "type": "button", + "action_id": "XXXXX", + "text": {"type": "plain_text", "text": "text"}, + "value": "value", + } + ], + } + ], + }, + } + form_event = {"payload": [json.dumps(payload)]} + + input_channel = SlackInput( + slack_token="YOUR_SLACK_TOKEN", slack_channel="YOUR_SLACK_CHANNEL" + ) + + r = Mock() + r.form = form_event + r.headers = header + metadata = input_channel.get_metadata(request=r) + assert metadata["out_channel"] == channel + assert metadata["users"] == authed_user + assert metadata["thread_id"] == ts + + +def test_slack_metadata_missing_keys(): + from rasa.core.channels.slack import SlackInput + from sanic.request import Request + + channel = "channel1" + ts = "1579802617.000800" + header = {"content-type": "application/json"} + direct_message_event = { + "event": { + "client_msg_id": "XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "type": "message", + "text": "hello world", + "ts": ts, + "team": "XXXXXXXXX", + "blocks": [ + { + "type": "rich_text", + "block_id": "XXXXX", + "elements": [ + { + "type": "rich_text_section", + "elements": [{"type": "text", "text": "hi"}], + } + ], + } + ], + "channel": channel, + "event_ts": "1579802617.000800", + "channel_type": "im", + } + } + + input_channel = SlackInput( + slack_token="YOUR_SLACK_TOKEN", slack_channel="YOUR_SLACK_CHANNEL" + ) + + r = Mock() + r.json = direct_message_event + r.headers = header + metadata = input_channel.get_metadata(request=r) + assert metadata["users"] is None + assert metadata["out_channel"] == channel + assert metadata["thread_id"] == ts + + +def test_slack_form_metadata_missing_keys(): + from rasa.core.channels.slack import SlackInput + + channel = "channel1" + ts = "1579802617.000800" + header = {"content-type": "application/x-www-form-urlencoded"} + payload = { + "type": "block_actions", + "channel": {"id": channel}, + "message": { + "type": "message", + "text": "text", + "ts": ts, + "blocks": [ + { + "type": "actions", + "block_id": "XXXXX", + "elements": [ + { + "type": "button", + "action_id": "XXXXX", + "text": {"type": "plain_text", "text": "text"}, + "value": "value", + } + ], + } + ], + }, + } + form_event = {"payload": [json.dumps(payload)]} + + input_channel = SlackInput( + slack_token="YOUR_SLACK_TOKEN", slack_channel="YOUR_SLACK_CHANNEL" + ) + + r = Mock() + r.form = form_event + r.headers = header + metadata = input_channel.get_metadata(request=r) + assert metadata["users"] is None + assert metadata["out_channel"] == channel + assert metadata["thread_id"] == ts + + +def test_slack_no_metadata(): + from rasa.core.channels.slack import SlackInput + + input_channel = SlackInput( + slack_token="YOUR_SLACK_TOKEN", slack_channel="YOUR_SLACK_CHANNEL" + ) + + r = Mock() + metadata = input_channel.get_metadata(request=r) + assert metadata == {} + + def test_slack_message_sanitization(): from rasa.core.channels.slack import SlackInput @@ -416,8 +711,10 @@ def test_slack_message_sanitization(): target_message_1 = "You can sit here if you want" target_message_2 = "Hey, you can sit here if you want !" target_message_3 = "Hey, you can sit here if you want!" + target_message_4 = "convert garbled url to vicdb-f.net" + target_message_5 = "convert multiple garbled url to vicdb-f.net. Also eemdb-p.net" - uid_token = "<@{}>".format(test_uid) + uid_token = f"<@{test_uid}>" raw_messages = [ test.format(uid=uid_token) for test in [ @@ -431,6 +728,8 @@ def test_slack_message_sanitization(): "You can sit here{uid}if you want", "Hey {uid}, you can sit here if you want{uid}!", "Hey{uid} , you can sit here if you want {uid}!", + "convert garbled url to <http://vicdb-f.net|vicdb-f.net>", + "convert multiple garbled url to <http://vicdb-f.net|vicdb-f.net>. Also <http://eemdb-p.net|eemdb-p.net>", ] ] @@ -441,6 +740,8 @@ def test_slack_message_sanitization(): target_message_1, target_message_2, target_message_3, + target_message_4, + target_message_5, ] sanitized_messages = [ @@ -477,6 +778,15 @@ def test_slack_init_two_parameters(): assert ch.slack_channel == "test" +def test_slack_init_three_parameters(): + from rasa.core.channels.slack import SlackInput + + ch = SlackInput("xoxb-test", "test", use_threads=True) + assert ch.slack_token == "xoxb-test" + assert ch.slack_channel == "test" + assert ch.use_threads is True + + def test_is_slack_message_none(): from rasa.core.channels.slack import SlackInput @@ -520,7 +830,7 @@ def test_slackbot_init_one_parameter(): from rasa.core.channels.slack import SlackBot ch = SlackBot("DummyToken") - assert ch.token == "DummyToken" + assert ch.client.token == "DummyToken" assert ch.slack_channel is None @@ -528,176 +838,403 @@ def test_slackbot_init_two_parameter(): from rasa.core.channels.slack import SlackBot bot = SlackBot("DummyToken", "General") - assert bot.token == "DummyToken" + assert bot.client.token == "DummyToken" + assert bot.slack_channel == "General" + + +def test_slackbot_init_three_parameter(): + from rasa.core.channels.slack import SlackBot + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + assert bot.client.token == "DummyToken" assert bot.slack_channel == "General" + assert bot.thread_id == "DummyThread" # Use monkeypatch for sending attachments, images and plain text. @pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") -@responses.activate +@pytest.mark.asyncio async def test_slackbot_send_attachment_only(): from rasa.core.channels.slack import SlackBot - responses.add( - responses.POST, - "https://slack.com/api/chat.postMessage", - body='{"ok":true,"purpose":"Testing bots"}', - ) + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) - bot = SlackBot("DummyToken", "General") - attachment = { - "fallback": "Financial Advisor Summary", - "color": "#36a64f", - "author_name": "ABE", - "title": "Financial Advisor Summary", - "title_link": "http://tenfactorialrocks.com", - "image_url": "https://r.com/cancel/r12", - "thumb_url": "https://r.com/cancel/r12", - "actions": [ - { - "type": "button", - "text": "\ud83d\udcc8 Dashboard", - "url": "https://r.com/cancel/r12", - "style": "primary", - }, - { - "type": "button", - "text": "\ud83d\udccb Download XL", - "url": "https://r.com/cancel/r12", - "style": "danger", - }, - { - "type": "button", - "text": "\ud83d\udce7 E-Mail", - "url": "https://r.com/cancel/r12", - "style": "danger", - }, - ], - "footer": "Powered by 1010rocks", - "ts": 1531889719, - } + bot = SlackBot("DummyToken", "General") + attachment = SLACK_TEST_ATTACHMENT - await bot.send_attachment("ID", attachment) + await bot.send_attachment("ID", attachment) - r = responses.calls[-1] + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") - assert r.request.body == { - "channel": ["General"], - "as_user": ["True"], - "text": ["Attachment"], - "attachments": [json.dumps([attachment])], - } + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "channel": "General", + "as_user": True, + "attachments": [attachment], + } @pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") -@responses.activate -async def test_slackbot_send_attachment_withtext(): +@pytest.mark.asyncio +async def test_slackbot_send_attachment_only_threaded(): from rasa.core.channels.slack import SlackBot - responses.add( - responses.POST, - "https://slack.com/api/chat.postMessage", - body='{"ok":true,"purpose":"Testing bots"}', - ) + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) - bot = SlackBot("DummyToken", "General") - attachment = { - "fallback": "Financial Advisor Summary", - "color": "#36a64f", - "author_name": "ABE", - "title": "Financial Advisor Summary", - "title_link": "http://tenfactorialrocks.com", - "text": "Here is the summary:", - "image_url": "https://r.com/cancel/r12", - "thumb_url": "https://r.com/cancel/r12", - "actions": [ - { - "type": "button", - "text": "\ud83d\udcc8 Dashboard", - "url": "https://r.com/cancel/r12", - "style": "primary", - }, - { - "type": "button", - "text": "\ud83d\udccb XL", - "url": "https://r.com/cancel/r12", - "style": "danger", - }, - { - "type": "button", - "text": "\ud83d\udce7 E-Mail", - "url": "https://r.com/cancel/r123", - "style": "danger", - }, - ], - "footer": "Powered by 1010rocks", - "ts": 1531889719, - } + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + attachment = SLACK_TEST_ATTACHMENT - await bot.send_attachment("ID", attachment) + await bot.send_attachment("ID", attachment) - r = responses.calls[-1] + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") - assert r.request.body == { - "channel": ["General"], - "as_user": ["True"], - "text": ["Here is the summary:"], - "attachments": [json.dumps([attachment])], - } + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "channel": "General", + "as_user": True, + "attachments": [attachment], + "thread_ts": "DummyThread", + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_attachment_with_text(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General") + attachment = SLACK_TEST_ATTACHMENT + attachment["text"] = "Here is the summary:" + + await bot.send_attachment("ID", attachment) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "channel": "General", + "as_user": True, + "attachments": [attachment], + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_attachment_with_text_threaded(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + attachment = SLACK_TEST_ATTACHMENT + attachment["text"] = "Here is the summary:" + + await bot.send_attachment("ID", attachment) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "channel": "General", + "as_user": True, + "attachments": [attachment], + "thread_ts": "DummyThread", + } @pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") -@responses.activate +@pytest.mark.asyncio async def test_slackbot_send_image_url(): from rasa.core.channels.slack import SlackBot - responses.add( - responses.POST, - "https://slack.com/api/chat.postMessage", - body='{"ok":true,"purpose":"Testing bots"}', - ) + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) - bot = SlackBot("DummyToken", "General") - url = "http://www.rasa.net" - await bot.send_image_url("ID", url) + bot = SlackBot("DummyToken", "General") + url = "http://www.rasa.net" + await bot.send_image_url("ID", url) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") - r = responses.calls[-1] + assert r - assert r.request.body["as_user"] == ["True"] - assert r.request.body["channel"] == ["General"] - assert len(r.request.body["blocks"]) == 1 - assert '"type": "image"' in r.request.body["blocks"][0] - assert '"alt_text": "http://www.rasa.net"' in r.request.body["blocks"][0] - assert '"image_url": "http://www.rasa.net"' in r.request.body["blocks"][0] + request_params = json_of_latest_request(r) + + assert request_params["as_user"] is True + assert request_params["channel"] == "General" + assert len(request_params["blocks"]) == 1 + assert request_params["blocks"][0].get("type") == "image" + assert request_params["blocks"][0].get("alt_text") == "http://www.rasa.net" + assert request_params["blocks"][0].get("image_url") == "http://www.rasa.net" @pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") -@responses.activate +@pytest.mark.asyncio +async def test_slackbot_send_image_url_threaded(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + url = "http://www.rasa.net" + await bot.send_image_url("ID", url) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params["as_user"] is True + assert request_params["channel"] == "General" + assert request_params["thread_ts"] == "DummyThread" + assert len(request_params["blocks"]) == 1 + assert request_params["blocks"][0].get("type") == "image" + assert request_params["blocks"][0].get("alt_text") == "http://www.rasa.net" + assert request_params["blocks"][0].get("image_url") == "http://www.rasa.net" + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio async def test_slackbot_send_text(): from rasa.core.channels.slack import SlackBot - responses.add( - responses.POST, - "https://slack.com/api/chat.postMessage", - body='{"ok":true,"purpose":"Testing bots"}', - ) + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) - bot = SlackBot("DummyToken", "General") - await bot.send_text_message("ID", "my message") + bot = SlackBot("DummyToken", "General") + await bot.send_text_message("ID", "my message") - r = responses.calls[-1] + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") - assert r.parsed_body == { - "as_user": ["True"], - "channel": ["General"], - "text": ["my message"], - "type": ["mrkdwn"], - } + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "as_user": True, + "channel": "General", + "text": "my message", + "type": "mrkdwn", + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_text_threaded(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + await bot.send_text_message("ID", "my message") + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "as_user": True, + "channel": "General", + "text": "my message", + "type": "mrkdwn", + "thread_ts": "DummyThread", + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_text_with_buttons(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General") + buttons = [{"title": "title", "payload": "payload"}] + + await bot.send_text_with_buttons("ID", "my message", buttons) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + text_block = { + "type": "section", + "text": {"type": "plain_text", "text": "my message"}, + } + button_block = { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "title"}, + "value": "payload", + } + ], + } + assert request_params == { + "as_user": True, + "channel": "General", + "text": "my message", + "blocks": [text_block, button_block], + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_text_with_buttons_threaded(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + buttons = [{"title": "title", "payload": "payload"}] + + await bot.send_text_with_buttons("ID", "my message", buttons) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + text_block = { + "type": "section", + "text": {"type": "plain_text", "text": "my message"}, + } + button_block = { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "title"}, + "value": "payload", + } + ], + } + assert request_params == { + "as_user": True, + "channel": "General", + "text": "my message", + "blocks": [text_block, button_block], + "thread_ts": "DummyThread", + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_custom_json(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General") + await bot.send_custom_json("ID", {"test_key": "test_value"}) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "as_user": True, + "channel": "General", + "test_key": "test_value", + } + + +@pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") +@pytest.mark.asyncio +async def test_slackbot_send_custom_json_threaded(): + from rasa.core.channels.slack import SlackBot + + with aioresponses() as mocked: + mocked.post( + "https://www.slack.com/api/chat.postMessage", + payload={"ok": True, "purpose": "Testing bots"}, + ) + + bot = SlackBot("DummyToken", "General", thread_id="DummyThread") + await bot.send_custom_json("ID", {"test_key": "test_value"}) + + r = latest_request(mocked, "POST", "https://www.slack.com/api/chat.postMessage") + + assert r + + request_params = json_of_latest_request(r) + + assert request_params == { + "as_user": True, + "channel": "General", + "thread_ts": "DummyThread", + "test_key": "test_value", + } @pytest.mark.filterwarnings("ignore:unclosed.*:ResourceWarning") def test_channel_inheritance(): - from rasa.core.channels.channel import RestInput + from rasa.core.channels import RestInput from rasa.core.channels.rasa_chat import RasaChatInput rasa_input = RasaChatInput("https://example.com") @@ -753,7 +1290,7 @@ def test_newsline_strip(): def test_register_channel_without_route(): """Check we properly connect the input channel blueprint if route is None""" - from rasa.core.channels.channel import RestInput + from rasa.core.channels import RestInput import rasa.core input_channel = RestInput() @@ -766,7 +1303,7 @@ def test_register_channel_without_route(): def test_channel_registration_with_absolute_url_prefix_overwrites_route(): - from rasa.core.channels.channel import RestInput + from rasa.core.channels import RestInput import rasa.core input_channel = RestInput() @@ -795,7 +1332,7 @@ def test_channel_registration_with_absolute_url_prefix_overwrites_route(): ], ) def test_extract_input_channel(test_input, expected): - from rasa.core.channels.channel import RestInput + from rasa.core.channels import RestInput input_channel = RestInput() @@ -822,3 +1359,47 @@ async def test_rasa_chat_input(): await rasa_chat_input._fetch_public_key() assert rasa_chat_input.jwt_key == public_key assert rasa_chat_input.jwt_algorithm == jwt_algorithm + + +@pytest.mark.parametrize( + "jwt, message", + [ + ({JWT_USERNAME_KEY: "abc"}, {CONVERSATION_ID_KEY: "abc"}), + ( + { + JWT_USERNAME_KEY: "abc", + "scopes": ["a", "b", INTERACTIVE_LEARNING_PERMISSION], + }, + {CONVERSATION_ID_KEY: "test"}, + ), + ], +) +def test_has_user_permission_to_send_messages_to_conversation(jwt: Dict, message: Dict): + assert RasaChatInput._has_user_permission_to_send_messages_to_conversation( + jwt, message + ) + + +@pytest.mark.parametrize( + "jwt, message", + [ + ({JWT_USERNAME_KEY: "abc"}, {CONVERSATION_ID_KEY: "xyz"}), + ( + {JWT_USERNAME_KEY: "abc", "scopes": ["a", "b"]}, + {CONVERSATION_ID_KEY: "test"}, + ), + ], +) +def test_has_user_permission_to_send_messages_to_conversation_without_permission( + jwt: Dict, message: Dict +): + assert not RasaChatInput._has_user_permission_to_send_messages_to_conversation( + jwt, message + ) + + +def test_set_console_stream_reading_timeout(monkeypatch: MonkeyPatch): + expected = 100 + monkeypatch.setenv(console.STREAM_READING_TIMEOUT_ENV, str(100)) + + assert console._get_stream_reading_timeout() == ClientTimeout(expected) diff --git a/tests/core/test_config.py b/tests/core/test_config.py index cb9cba4af9a3..4f795d56cc3b 100644 --- a/tests/core/test_config.py +++ b/tests/core/test_config.py @@ -4,7 +4,6 @@ from tests.core.conftest import ExamplePolicy from rasa.core.config import load from rasa.core.policies.memoization import MemoizationPolicy -from rasa.core.policies.keras_policy import KerasPolicy from rasa.core.policies.fallback import FallbackPolicy from rasa.core.policies.form_policy import FormPolicy from rasa.core.policies.ensemble import PolicyEnsemble @@ -27,18 +26,6 @@ def check_memoization(p): assert p.max_history == 5 assert p.priority == 3 - def check_keras(p): - featurizer = p.featurizer - state_featurizer = featurizer.state_featurizer - # Assert policy - assert p.epochs == 50 - # Assert featurizer - assert isinstance(featurizer, MaxHistoryTrackerFeaturizer) - assert featurizer.max_history == 5 - # Assert state_featurizer - assert isinstance(state_featurizer, BinarySingleStateFeaturizer) - assert p.priority == 4 - def check_fallback(p): assert p.fallback_action_name == "action_default_fallback" assert p.nlu_threshold == 0.7 @@ -50,18 +37,6 @@ def check_form(p): ensemble_dict = { "policies": [ - { - "epochs": 50, - "name": "KerasPolicy", - "priority": 4, - "featurizer": [ - { - "max_history": 5, - "name": "MaxHistoryTrackerFeaturizer", - "state_featurizer": [{"name": "BinarySingleStateFeaturizer"}], - } - ], - }, {"max_history": 5, "priority": 3, "name": "MemoizationPolicy"}, { "core_threshold": 0.7, @@ -76,7 +51,7 @@ def check_form(p): ensemble = PolicyEnsemble.from_dict(ensemble_dict) # Check if all policies are present - assert len(ensemble) == 4 + assert len(ensemble) == 3 # MemoizationPolicy is parent of FormPolicy assert any( [ @@ -84,7 +59,6 @@ def check_form(p): for p in ensemble ] ) - assert any([isinstance(p, KerasPolicy) for p in ensemble]) assert any([isinstance(p, FallbackPolicy) for p in ensemble]) assert any([isinstance(p, FormPolicy) for p in ensemble]) @@ -95,7 +69,5 @@ def check_form(p): check_form(policy) else: check_memoization(policy) - elif isinstance(policy, KerasPolicy): - check_keras(policy) elif isinstance(policy, FallbackPolicy): check_fallback(policy) diff --git a/tests/core/test_data.py b/tests/core/test_data.py index c367cd760b66..7c1b2405c162 100644 --- a/tests/core/test_data.py +++ b/tests/core/test_data.py @@ -3,20 +3,24 @@ import tempfile import pytest +from pathlib import Path import rasa.data as data -from tests.core.conftest import DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA +from tests.conftest import DEFAULT_NLU_DATA +from tests.core.conftest import DEFAULT_STORIES_FILE from rasa.nlu.training_data import load_data from rasa.nlu.utils import json_to_string +from rasa.utils import io def test_get_core_directory(project): data_dir = os.path.join(project, "data") core_directory = data.get_core_directory([data_dir]) - stories = os.listdir(core_directory) + core_files = os.listdir(core_directory) - assert len(stories) == 1 - assert stories[0].endswith("stories.md") + assert len(core_files) == 2 + assert any(file.endswith("stories.yml") for file in core_files) + assert any(file.endswith("rules.yml") for file in core_files) def test_get_nlu_directory(project): @@ -26,11 +30,11 @@ def test_get_nlu_directory(project): nlu_files = os.listdir(nlu_directory) assert len(nlu_files) == 1 - assert nlu_files[0].endswith("nlu.md") + assert nlu_files[0].endswith("nlu.yml") def test_get_nlu_file(project): - data_file = os.path.join(project, "data/nlu.md") + data_file = os.path.join(project, "data/nlu.yml") nlu_directory = data.get_nlu_directory(data_file) nlu_files = os.listdir(nlu_directory) @@ -38,7 +42,7 @@ def test_get_nlu_file(project): original = load_data(data_file) copied = load_data(nlu_directory) - assert nlu_files[0].endswith("nlu.md") + assert nlu_files[0].endswith("nlu.yml") assert original.intent_examples == copied.intent_examples @@ -47,10 +51,11 @@ def test_get_core_nlu_files(project): core_files, nlu_files = data.get_core_nlu_files([data_dir]) assert len(nlu_files) == 1 - assert list(nlu_files)[0].endswith("nlu.md") + assert list(nlu_files)[0].endswith("nlu.yml") - assert len(core_files) == 1 - assert list(core_files)[0].endswith("stories.md") + assert len(core_files) == 2 + assert any(file.endswith("stories.yml") for file in core_files) + assert any(file.endswith("rules.yml") for file in core_files) def test_get_core_nlu_directories(project): @@ -60,12 +65,13 @@ def test_get_core_nlu_directories(project): nlu_files = os.listdir(nlu_directory) assert len(nlu_files) == 1 - assert nlu_files[0].endswith("nlu.md") + assert nlu_files[0].endswith("nlu.yml") - stories = os.listdir(core_directory) + core_files = os.listdir(core_directory) - assert len(stories) == 1 - assert stories[0].endswith("stories.md") + assert len(core_files) == 2 + assert any(file.endswith("stories.yml") for file in core_files) + assert any(file.endswith("rules.yml") for file in core_files) def test_get_core_nlu_directories_with_none(): @@ -86,8 +92,8 @@ def test_same_file_names_get_resolved(tmpdir): shutil.copy2(DEFAULT_STORIES_FILE, data_dir_one) shutil.copy2(DEFAULT_STORIES_FILE, data_dir_two) - nlu_dir_one = os.path.join(tmpdir.join("one").join("nlu.md").strpath) - nlu_dir_two = os.path.join(tmpdir.join("two").join("nlu.md").strpath) + nlu_dir_one = os.path.join(tmpdir.join("one").join("nlu.yml").strpath) + nlu_dir_two = os.path.join(tmpdir.join("two").join("nlu.yml").strpath) shutil.copy2(DEFAULT_NLU_DATA, nlu_dir_one) shutil.copy2(DEFAULT_NLU_DATA, nlu_dir_two) @@ -96,7 +102,7 @@ def test_same_file_names_get_resolved(tmpdir): nlu_files = os.listdir(nlu_directory) assert len(nlu_files) == 2 - assert all([f.endswith("nlu.md") for f in nlu_files]) + assert all([f.endswith("nlu.yml") for f in nlu_files]) stories = os.listdir(core_directory) @@ -109,47 +115,55 @@ def test_same_file_names_get_resolved(tmpdir): [ ( "dialogflow", - { + [ + "data/examples/dialogflow/agent.json", "data/examples/dialogflow/entities/cuisine.json", - "data/examples/dialogflow/intents/affirm.json", - "data/examples/dialogflow/entities/location_entries_es.json", - "data/examples/dialogflow/intents/affirm_usersays_en.json", - "data/examples/dialogflow/intents/hi_usersays_es.json", + "data/examples/dialogflow/entities/cuisine_entries_en.json", "data/examples/dialogflow/entities/cuisine_entries_es.json", - "data/examples/dialogflow/intents/inform_usersays_en.json", - "data/examples/dialogflow/intents/hi.json", - "data/examples/dialogflow/intents/goodbye_usersays_en.json", - "data/examples/dialogflow/agent.json", - "data/examples/dialogflow/intents/hi_usersays_en.json", "data/examples/dialogflow/entities/location.json", - "data/examples/dialogflow/intents/affirm_usersays_es.json", - "data/examples/dialogflow/entities/cuisine_entries_en.json", - "data/examples/dialogflow/package.json", + "data/examples/dialogflow/entities/location_entries_en.json", + "data/examples/dialogflow/entities/location_entries_es.json", "data/examples/dialogflow/intents/Default Fallback Intent.json", - "data/examples/dialogflow/intents/goodbye_usersays_es.json", + "data/examples/dialogflow/intents/affirm.json", + "data/examples/dialogflow/intents/affirm_usersays_en.json", + "data/examples/dialogflow/intents/affirm_usersays_es.json", "data/examples/dialogflow/intents/goodbye.json", - "data/examples/dialogflow/entities/location_entries_en.json", + "data/examples/dialogflow/intents/goodbye_usersays_en.json", + "data/examples/dialogflow/intents/goodbye_usersays_es.json", + "data/examples/dialogflow/intents/hi.json", + "data/examples/dialogflow/intents/hi_usersays_en.json", + "data/examples/dialogflow/intents/hi_usersays_es.json", "data/examples/dialogflow/intents/inform.json", + "data/examples/dialogflow/intents/inform_usersays_en.json", "data/examples/dialogflow/intents/inform_usersays_es.json", - }, + "data/examples/dialogflow/package.json", + ], + ), + ( + "luis", + [ + "data/examples/luis/demo-restaurants_v2.json", + "data/examples/luis/demo-restaurants_v4.json", + "data/examples/luis/demo-restaurants_v5.json", + ], ), - ("luis", {"data/examples/luis/demo-restaurants.json"}), ( "rasa", - { + [ + "data/examples/rasa/demo-rasa-multi-intent.md", + "data/examples/rasa/demo-rasa-responses.md", "data/examples/rasa/demo-rasa.json", "data/examples/rasa/demo-rasa.md", - "data/examples/rasa/demo-rasa-responses.md", - }, + ], ), - ("wit", {"data/examples/wit/demo-flights.json"}), + ("wit", ["data/examples/wit/demo-flights.json"]), ], ) def test_find_nlu_files_with_different_formats(test_input, expected): examples_dir = "data/examples" data_dir = os.path.join(examples_dir, test_input) core_files, nlu_files = data.get_core_nlu_files([data_dir]) - assert nlu_files == expected + assert [Path(f) for f in nlu_files] == [Path(f) for f in expected] def test_is_nlu_file_with_json(): @@ -163,8 +177,8 @@ def test_is_nlu_file_with_json(): directory = tempfile.mkdtemp() file = os.path.join(directory, "test.json") - with open(file, "w", encoding="utf-8") as f: - f.write(json_to_string(test)) + + io.write_text_file(json_to_string(test), file) assert data.is_nlu_file(file) @@ -172,7 +186,12 @@ def test_is_nlu_file_with_json(): def test_is_not_nlu_file_with_json(): directory = tempfile.mkdtemp() file = os.path.join(directory, "test.json") - with open(file, "w", encoding="utf-8") as f: - f.write('{"test": "a"}') + io.write_text_file('{"test": "a"}', file) assert not data.is_nlu_file(file) + + +def test_get_story_file_with_yaml(): + examples_dir = "data/test_yaml_stories" + core_files, nlu_files = data.get_core_nlu_files([examples_dir]) + assert core_files diff --git a/tests/core/test_dialogues.py b/tests/core/test_dialogues.py index bc8ba5b07f81..a3a8c09c051a 100644 --- a/tests/core/test_dialogues.py +++ b/tests/core/test_dialogues.py @@ -4,10 +4,15 @@ import pytest import rasa.utils.io +from rasa.core.conversation import Dialogue from rasa.core.domain import Domain from rasa.core.tracker_store import InMemoryTrackerStore +from tests.core.conftest import ( + DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS, + TEST_DIALOGUES, + EXAMPLE_DOMAINS, +) from tests.core.utilities import tracker_from_dialogue_file -from tests.core.conftest import TEST_DIALOGUES, EXAMPLE_DOMAINS @pytest.mark.parametrize("filename", TEST_DIALOGUES) @@ -30,9 +35,18 @@ def test_inmemory_tracker_store(pair): assert restored == tracker -def test_tracker_restaurant(): - domain = Domain.load("examples/restaurantbot/domain.yml") - filename = "data/test_dialogues/restaurantbot.json" +def test_tracker_default(): + domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS) + filename = "data/test_dialogues/default.json" tracker = tracker_from_dialogue_file(filename, domain) - assert tracker.get_slot("price") == "lo" - assert tracker.get_slot("name") is None # slot doesn't exist! + assert tracker.get_slot("name") == "Peter" + assert tracker.get_slot("price") is None # slot doesn't exist! + + +def test_dialogue_from_parameters(): + domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS) + filename = "data/test_dialogues/default.json" + tracker = tracker_from_dialogue_file(filename, domain) + serialised_dialogue = InMemoryTrackerStore.serialise_tracker(tracker) + deserialised_dialogue = Dialogue.from_parameters(json.loads(serialised_dialogue)) + assert tracker.as_dialogue().as_dict() == deserialised_dialogue.as_dict() diff --git a/tests/core/test_domain.py b/tests/core/test_domain.py index 047cf906e98b..2820af9976c7 100644 --- a/tests/core/test_domain.py +++ b/tests/core/test_domain.py @@ -1,20 +1,30 @@ +import copy import json +from pathlib import Path +from typing import Dict import pytest from _pytest.tmpdir import TempdirFactory +from rasa.constants import DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES from rasa.core.constants import ( DEFAULT_KNOWLEDGE_BASE_ACTION, SLOT_LISTED_ITEMS, SLOT_LAST_OBJECT, SLOT_LAST_OBJECT_TYPE, + DEFAULT_INTENTS, ) +from rasa.core.domain import USED_ENTITIES_KEY, USE_ENTITIES_KEY, IGNORE_ENTITIES_KEY from rasa.core import training, utils -from rasa.core.domain import Domain, InvalidDomain +from rasa.core.domain import Domain, InvalidDomain, SessionConfig from rasa.core.featurizers import MaxHistoryTrackerFeaturizer from rasa.core.slots import TextSlot, UnfeaturizedSlot -from tests.core import utilities -from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE +from tests.core.conftest import ( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS, + DEFAULT_STORIES_FILE, +) +from rasa.utils import io as io_utils async def test_create_train_data_no_history(default_domain): @@ -155,8 +165,17 @@ def test_domain_from_template(): domain = Domain.load(domain_file) assert not domain.is_empty() - assert len(domain.intents) == 10 - assert len(domain.action_names) == 11 + assert len(domain.intents) == 10 + len(DEFAULT_INTENTS) + assert len(domain.action_names) == 15 + + +def test_avoid_action_repetition(): + domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS) + domain_with_no_actions = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS_AND_NO_ACTIONS) + + assert not domain.is_empty() and not domain_with_no_actions.is_empty() + assert len(domain.intents) == len(domain_with_no_actions.intents) + assert len(domain.action_names) == len(domain_with_no_actions.action_names) def test_utter_templates(): @@ -172,21 +191,18 @@ def test_utter_templates(): assert domain.random_template_for("utter_greet") == expected_template -def test_custom_slot_type(tmpdir): - domain_path = utilities.write_text_to_file( - tmpdir, - "domain.yml", +def test_custom_slot_type(tmpdir: Path): + domain_path = str(tmpdir / "domain.yml") + io_utils.write_text_file( """ slots: custom: type: tests.core.conftest.CustomSlot - templates: + responses: utter_greet: - - text: hey there! - - actions: - - utter_greet """, + - text: hey there! """, + domain_path, ) Domain.load(domain_path) @@ -199,70 +215,139 @@ def test_custom_slot_type(tmpdir): custom: type: tests.core.conftest.Unknown - templates: + responses: utter_greet: - - text: hey there! - - actions: - - utter_greet""", + - text: hey there!""", """ slots: custom: type: blubblubblub - templates: + responses: utter_greet: - - text: hey there! - - actions: - - utter_greet""", + - text: hey there!""", ], ) def test_domain_fails_on_unknown_custom_slot_type(tmpdir, domain_unkown_slot_type): - domain_path = utilities.write_text_to_file( - tmpdir, "domain.yml", domain_unkown_slot_type - ) + domain_path = str(tmpdir / "domain.yml") + io_utils.write_text_file(domain_unkown_slot_type, domain_path) with pytest.raises(ValueError): Domain.load(domain_path) +def test_domain_to_dict(): + test_yaml = """ + actions: + - action_save_world + config: + store_entities_as_slots: true + entities: [] + forms: [] + intents: [] + responses: + utter_greet: + - text: hey there! + session_config: + carry_over_slots_to_new_session: true + session_expiration_time: 60 + slots: {}""" + + domain_as_dict = Domain.from_yaml(test_yaml).as_dict() + + assert domain_as_dict == { + "actions": ["action_save_world"], + "config": {"store_entities_as_slots": True}, + "entities": [], + "forms": [], + "intents": [], + "responses": {"utter_greet": [{"text": "hey there!"}]}, + "session_config": { + "carry_over_slots_to_new_session": True, + "session_expiration_time": 60, + }, + "slots": {}, + } + + def test_domain_to_yaml(): - test_yaml = """actions: + test_yaml = f""" +%YAML 1.2 +--- +actions: +- action_save_world +config: + store_entities_as_slots: true +entities: [] +forms: [] +intents: [] +responses: + utter_greet: + - text: hey there! +session_config: + carry_over_slots_to_new_session: true + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} +slots: {{}}""" + + domain = Domain.from_yaml(test_yaml) + + actual_yaml = domain.as_yaml() + + assert actual_yaml.strip() == test_yaml.strip() + + +def test_domain_to_yaml_deprecated_templates(): + test_yaml = f"""actions: - utter_greet config: store_entities_as_slots: true entities: [] forms: [] intents: [] -slots: {} templates: utter_greet: - - text: hey there!""" + - text: hey there! +session_config: + carry_over_slots_to_new_session: true + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} +slots: {{}}""" + + target_yaml = f"""actions: +- utter_greet +config: + store_entities_as_slots: true +entities: [] +forms: [] +intents: [] +responses: + utter_greet: + - text: hey there! +session_config: + carry_over_slots_to_new_session: true + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} +slots: {{}}""" domain = Domain.from_yaml(test_yaml) # python 3 and 2 are different here, python 3 will have a leading set # of --- at the beginning of the yml - assert domain.as_yaml().strip().endswith(test_yaml.strip()) + assert domain.as_yaml().strip().endswith(target_yaml.strip()) assert Domain.from_yaml(domain.as_yaml()) is not None def test_merge_yaml_domains(): - test_yaml_1 = """actions: -- utter_greet -config: + test_yaml_1 = """config: store_entities_as_slots: true entities: [] intents: [] slots: {} -templates: +responses: utter_greet: - text: hey there!""" - test_yaml_2 = """actions: -- utter_greet -- utter_goodbye -config: + test_yaml_2 = """config: store_entities_as_slots: false +session_config: + session_expiration_time: 20 + carry_over_slots: true entities: - cuisine intents: @@ -270,7 +355,9 @@ def test_merge_yaml_domains(): slots: cuisine: type: text -templates: +responses: + utter_goodbye: + - text: bye! utter_greet: - text: hey you!""" @@ -280,36 +367,92 @@ def test_merge_yaml_domains(): # single attribute should be taken from domain_1 assert domain.store_entities_as_slots # conflicts should be taken from domain_1 - assert domain.templates == {"utter_greet": [{"text": "hey there!"}]} + assert domain.templates == { + "utter_greet": [{"text": "hey there!"}], + "utter_goodbye": [{"text": "bye!"}], + } # lists should be deduplicated and merged - assert domain.intents == ["greet"] + assert domain.intents == sorted(["greet", *DEFAULT_INTENTS]) assert domain.entities == ["cuisine"] assert isinstance(domain.slots[0], TextSlot) assert domain.slots[0].name == "cuisine" assert sorted(domain.user_actions) == sorted(["utter_greet", "utter_goodbye"]) + assert domain.session_config == SessionConfig(20, True) domain = domain_1.merge(domain_2, override=True) # single attribute should be taken from domain_2 assert not domain.store_entities_as_slots # conflicts should take value from domain_2 - assert domain.templates == {"utter_greet": [{"text": "hey you!"}]} + assert domain.templates == { + "utter_greet": [{"text": "hey you!"}], + "utter_goodbye": [{"text": "bye!"}], + } + assert domain.session_config == SessionConfig(20, True) + + +def test_merge_session_config_if_first_is_not_default(): + yaml1 = """ +session_config: + session_expiration_time: 20 + carry_over_slots: true""" + + yaml2 = """ + session_config: + session_expiration_time: 40 + carry_over_slots: true + """ + + domain1 = Domain.from_yaml(yaml1) + domain2 = Domain.from_yaml(yaml2) + + merged = domain1.merge(domain2) + assert merged.session_config == SessionConfig(20, True) + + merged = domain1.merge(domain2, override=True) + assert merged.session_config == SessionConfig(40, True) + + +def test_merge_domain_with_forms(): + test_yaml_1 = """ + forms: + # Old style form definitions (before RulePolicy) + - my_form + - my_form2 + """ + + test_yaml_2 = """ + forms: + - my_form3: + slot1: + type: from_text + """ + + domain_1 = Domain.from_yaml(test_yaml_1) + domain_2 = Domain.from_yaml(test_yaml_2) + domain = domain_1.merge(domain_2) + + expected_number_of_forms = 3 + assert len(domain.form_names) == expected_number_of_forms + assert len(domain.forms) == expected_number_of_forms @pytest.mark.parametrize( - "intents, intent_properties", + "intents, entities, intent_properties", [ ( ["greet", "goodbye"], + ["entity", "other", "third"], { - "greet": {"use_entities": True, "ignore_entities": []}, - "goodbye": {"use_entities": True, "ignore_entities": []}, + "greet": {USED_ENTITIES_KEY: ["entity", "other", "third"]}, + "goodbye": {USED_ENTITIES_KEY: ["entity", "other", "third"]}, }, ), ( - [{"greet": {"use_entities": []}}, "goodbye"], + [{"greet": {USE_ENTITIES_KEY: []}}, "goodbye"], + ["entity", "other", "third"], { - "greet": {"use_entities": [], "ignore_entities": []}, - "goodbye": {"use_entities": True, "ignore_entities": []}, + "greet": {USED_ENTITIES_KEY: []}, + "goodbye": {USED_ENTITIES_KEY: ["entity", "other", "third"]}, }, ), ( @@ -317,67 +460,58 @@ def test_merge_yaml_domains(): { "greet": { "triggers": "utter_goodbye", - "use_entities": ["entity"], - "ignore_entities": ["other"], + USE_ENTITIES_KEY: ["entity"], + IGNORE_ENTITIES_KEY: ["other"], } }, "goodbye", ], + ["entity", "other", "third"], { - "greet": { - "triggers": "utter_goodbye", - "use_entities": ["entity"], - "ignore_entities": ["other"], - }, - "goodbye": {"use_entities": True, "ignore_entities": []}, + "greet": {"triggers": "utter_goodbye", USED_ENTITIES_KEY: ["entity"]}, + "goodbye": {USED_ENTITIES_KEY: ["entity", "other", "third"]}, }, ), ( [ - {"greet": {"triggers": "utter_goodbye", "use_entities": None}}, - {"goodbye": {"use_entities": [], "ignore_entities": []}}, + {"greet": {"triggers": "utter_goodbye", USE_ENTITIES_KEY: None}}, + {"goodbye": {USE_ENTITIES_KEY: [], IGNORE_ENTITIES_KEY: []}}, ], + ["entity", "other", "third"], { - "greet": { - "use_entities": [], - "ignore_entities": [], - "triggers": "utter_goodbye", - }, - "goodbye": {"use_entities": [], "ignore_entities": []}, + "greet": {USED_ENTITIES_KEY: [], "triggers": "utter_goodbye"}, + "goodbye": {USED_ENTITIES_KEY: []}, }, ), ], ) -def test_collect_intent_properties(intents, intent_properties): - assert Domain.collect_intent_properties(intents) == intent_properties +def test_collect_intent_properties(intents, entities, intent_properties): + Domain._add_default_intents(intent_properties, entities) + + assert Domain.collect_intent_properties(intents, entities) == intent_properties def test_load_domain_from_directory_tree(tmpdir_factory: TempdirFactory): root = tmpdir_factory.mktemp("Parent Bot") root_domain = {"actions": ["utter_root", "utter_root2"]} - utils.dump_obj_as_yaml_to_file(root / "domain.yml", root_domain) + utils.dump_obj_as_yaml_to_file(root / "domain_pt1.yml", root_domain) subdirectory_1 = root / "Skill 1" subdirectory_1.mkdir() skill_1_domain = {"actions": ["utter_skill_1"]} - utils.dump_obj_as_yaml_to_file(subdirectory_1 / "domain.yml", skill_1_domain) + utils.dump_obj_as_yaml_to_file(subdirectory_1 / "domain_pt2.yml", skill_1_domain) subdirectory_2 = root / "Skill 2" subdirectory_2.mkdir() skill_2_domain = {"actions": ["utter_skill_2"]} - utils.dump_obj_as_yaml_to_file(subdirectory_2 / "domain.yml", skill_2_domain) + utils.dump_obj_as_yaml_to_file(subdirectory_2 / "domain_pt3.yml", skill_2_domain) subsubdirectory = subdirectory_2 / "Skill 2-1" subsubdirectory.mkdir() skill_2_1_domain = {"actions": ["utter_subskill", "utter_root"]} # Check if loading from `.yaml` also works - utils.dump_obj_as_yaml_to_file(subsubdirectory / "domain.yaml", skill_2_1_domain) - - subsubdirectory_2 = subdirectory_2 / "Skill 2-2" - subsubdirectory_2.mkdir() - excluded_domain = {"actions": ["should not be loaded"]} utils.dump_obj_as_yaml_to_file( - subsubdirectory_2 / "other_name.yaml", excluded_domain + subsubdirectory / "domain_pt4.yaml", skill_2_1_domain ) actual = Domain.load(str(root)) @@ -458,7 +592,7 @@ def test_check_domain_sanity_on_invalid_domain(): slots=[], templates={}, action_names=["random_name", "random_name"], - form_names=[], + forms=[], ) with pytest.raises(InvalidDomain): @@ -468,7 +602,7 @@ def test_check_domain_sanity_on_invalid_domain(): slots=[TextSlot("random_name"), TextSlot("random_name")], templates={}, action_names=[], - form_names=[], + forms=[], ) with pytest.raises(InvalidDomain): @@ -478,7 +612,7 @@ def test_check_domain_sanity_on_invalid_domain(): slots=[], templates={}, action_names=[], - form_names=[], + forms=[], ) with pytest.raises(InvalidDomain): @@ -488,7 +622,7 @@ def test_check_domain_sanity_on_invalid_domain(): slots=[], templates={}, action_names=[], - form_names=["random_name", "random_name"], + forms=["random_name", "random_name"], ) @@ -514,22 +648,83 @@ def test_is_empty(): assert Domain.empty().is_empty() -def test_clean_domain(): +def test_transform_intents_for_file_default(): + domain_path = "data/test_domains/default_unfeaturized_entities.yml" + domain = Domain.load(domain_path) + transformed = domain._transform_intents_for_file() + + expected = [ + {"greet": {USE_ENTITIES_KEY: ["name"]}}, + {"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}}, + {"goodbye": {USE_ENTITIES_KEY: []}}, + {"thank": {USE_ENTITIES_KEY: []}}, + {"ask": {USE_ENTITIES_KEY: True}}, + {"why": {USE_ENTITIES_KEY: []}}, + {"pure_intent": {USE_ENTITIES_KEY: True}}, + ] + + assert transformed == expected + + +def test_transform_intents_for_file_with_mapping(): + domain_path = "data/test_domains/default_with_mapping.yml" + domain = Domain.load(domain_path) + transformed = domain._transform_intents_for_file() + + expected = [ + {"greet": {"triggers": "utter_greet", USE_ENTITIES_KEY: True}}, + {"default": {"triggers": "utter_default", USE_ENTITIES_KEY: True}}, + {"goodbye": {USE_ENTITIES_KEY: True}}, + ] + + assert transformed == expected + + +def test_clean_domain_for_file(): domain_path = "data/test_domains/default_unfeaturized_entities.yml" cleaned = Domain.load(domain_path).cleaned_domain() expected = { "intents": [ - {"greet": {"use_entities": ["name"]}}, - {"default": {"ignore_entities": ["unrelated_recognized_entity"]}}, - {"goodbye": {"use_entities": []}}, - {"thank": {"use_entities": []}}, + {"greet": {USE_ENTITIES_KEY: ["name"]}}, + {"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}}, + {"goodbye": {USE_ENTITIES_KEY: []}}, + {"thank": {USE_ENTITIES_KEY: []}}, "ask", - {"why": {"use_entities": []}}, + {"why": {USE_ENTITIES_KEY: []}}, "pure_intent", ], "entities": ["name", "other", "unrelated_recognized_entity"], - "templates": { + "responses": { + "utter_greet": [{"text": "hey there!"}], + "utter_goodbye": [{"text": "goodbye :("}], + "utter_default": [{"text": "default message"}], + }, + "session_config": { + "carry_over_slots_to_new_session": True, + "session_expiration_time": DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, + }, + } + + assert cleaned == expected + + +def test_clean_domain_deprecated_templates(): + domain_path = "data/test_domains/default_deprecated_templates.yml" + cleaned = Domain.load(domain_path).cleaned_domain() + + expected = { + "intents": [ + {"greet": {USE_ENTITIES_KEY: ["name"]}}, + {"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}}, + {"goodbye": {USE_ENTITIES_KEY: []}}, + {"thank": {USE_ENTITIES_KEY: []}}, + "ask", + {"why": {USE_ENTITIES_KEY: []}}, + "pure_intent", + ], + "entities": ["name", "other", "unrelated_recognized_entity"], + "responses": { "utter_greet": [{"text": "hey there!"}], "utter_goodbye": [{"text": "goodbye :("}], "utter_default": [{"text": "default message"}], @@ -544,8 +739,6 @@ def test_clean_domain(): def test_add_knowledge_base_slots(default_domain): - import copy - # don't modify default domain as it is used in other tests test_domain = copy.deepcopy(default_domain) @@ -564,3 +757,137 @@ def test_add_knowledge_base_slots(default_domain): assert SLOT_LISTED_ITEMS in slot_names assert SLOT_LAST_OBJECT in slot_names assert SLOT_LAST_OBJECT_TYPE in slot_names + + +@pytest.mark.parametrize( + "input_domain, expected_session_expiration_time, expected_carry_over_slots", + [ + ( + f"""session_config: + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} + carry_over_slots_to_new_session: true""", + DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, + True, + ), + ("", DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, True), + ( + """session_config: + carry_over_slots_to_new_session: false""", + DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, + False, + ), + ( + """session_config: + session_expiration_time: 20.2 + carry_over_slots_to_new_session: False""", + 20.2, + False, + ), + ("""session_config: {}""", DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, True), + ], +) +def test_session_config( + input_domain, + expected_session_expiration_time: float, + expected_carry_over_slots: bool, +): + domain = Domain.from_yaml(input_domain) + assert ( + domain.session_config.session_expiration_time + == expected_session_expiration_time + ) + assert domain.session_config.carry_over_slots == expected_carry_over_slots + + +def test_domain_as_dict_with_session_config(): + session_config = SessionConfig(123, False) + domain = Domain.empty() + domain.session_config = session_config + + serialized = domain.as_dict() + deserialized = Domain.from_dict(serialized) + + assert deserialized.session_config == session_config + + +@pytest.mark.parametrize( + "session_config, enabled", + [ + (SessionConfig(0, True), False), + (SessionConfig(1, True), True), + (SessionConfig(-1, False), False), + ], +) +def test_are_sessions_enabled(session_config: SessionConfig, enabled: bool): + assert session_config.are_sessions_enabled() == enabled + + +def test_domain_utterance_actions_deprecated_templates(): + new_yaml = f"""config: + store_entities_as_slots: true +entities: [] +forms: [] +intents: [] +templates: + utter_greet: + - text: hey there! + utter_goodbye: + - text: bye! +session_config: + carry_over_slots_to_new_session: true + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} +slots: {{}}""" + + old_yaml = f"""config: + store_entities_as_slots: true +entities: [] +forms: [] +intents: [] +responses: + utter_greet: + - text: hey there! + utter_goodbye: + - text: bye! +session_config: + carry_over_slots_to_new_session: true + session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES} +slots: {{}}""" + + old_domain = Domain.from_yaml(old_yaml) + new_domain = Domain.from_yaml(new_yaml) + assert hash(old_domain) == hash(new_domain) + + +def test_domain_from_dict_does_not_change_input(): + input_before = { + "intents": [ + {"greet": {USE_ENTITIES_KEY: ["name"]}}, + {"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}}, + {"goodbye": {USE_ENTITIES_KEY: None}}, + {"thank": {USE_ENTITIES_KEY: False}}, + {"ask": {USE_ENTITIES_KEY: True}}, + {"why": {USE_ENTITIES_KEY: []}}, + "pure_intent", + ], + "entities": ["name", "unrelated_recognized_entity", "other"], + "slots": {"name": {"type": "text"}}, + "responses": { + "utter_greet": [{"text": "hey there {name}!"}], + "utter_goodbye": [{"text": "goodbye 😢"}, {"text": "bye bye 😢"}], + "utter_default": [{"text": "default message"}], + }, + } + + input_after = copy.deepcopy(input_before) + Domain.from_dict(input_after) + + assert input_after == input_before + + +@pytest.mark.parametrize( + "domain", [{}, {"intents": DEFAULT_INTENTS}, {"intents": [DEFAULT_INTENTS[0]]}] +) +def test_add_default_intents(domain: Dict): + domain = Domain.from_dict(domain) + + assert all(intent_name in domain.intents for intent_name in DEFAULT_INTENTS) diff --git a/tests/core/test_dsl.py b/tests/core/test_dsl.py index 7b17848b79c0..f04055439e42 100644 --- a/tests/core/test_dsl.py +++ b/tests/core/test_dsl.py @@ -1,366 +1,127 @@ -import os - -import json -from collections import Counter - -import numpy as np - -from rasa.core import training -from rasa.core.interpreter import RegexInterpreter -from rasa.core.training.dsl import StoryFileReader -from rasa.core.domain import Domain -from rasa.core.trackers import DialogueStateTracker -from rasa.core.events import ( - UserUttered, - ActionExecuted, - ActionExecutionRejected, - Form, - FormValidation, +from typing import Text, Dict + +import pytest + +from rasa.core.events import UserUttered +from rasa.core.training.dsl import EndToEndReader + + +@pytest.mark.parametrize( + "line, expected", + [ + (" greet: hi", {"intent": "greet", "true_intent": "greet", "text": "hi"}), + ( + " greet: /greet", + { + "intent": "greet", + "true_intent": "greet", + "text": "/greet", + "entities": [], + }, + ), + ( + 'greet: /greet{"test": "test"}', + { + "intent": "greet", + "entities": [ + {"entity": "test", "start": 6, "end": 22, "value": "test"} + ], + "true_intent": "greet", + "text": '/greet{"test": "test"}', + }, + ), + ( + 'greet{"test": "test"}: /greet{"test": "test"}', + { + "intent": "greet", + "entities": [ + {"entity": "test", "start": 6, "end": 22, "value": "test"} + ], + "true_intent": "greet", + "text": '/greet{"test": "test"}', + }, + ), + ( + "mood_great: [great](feeling)", + { + "intent": "mood_great", + "entities": [ + {"start": 0, "end": 5, "value": "great", "entity": "feeling"} + ], + "true_intent": "mood_great", + "text": "great", + }, + ), + ( + 'form: greet{"test": "test"}: /greet{"test": "test"}', + { + "intent": "greet", + "entities": [ + {"end": 22, "entity": "test", "start": 6, "value": "test"} + ], + "true_intent": "greet", + "text": '/greet{"test": "test"}', + }, + ), + ], ) -from rasa.core.training.structures import Story -from rasa.core.featurizers import ( - MaxHistoryTrackerFeaturizer, - BinarySingleStateFeaturizer, +def test_e2e_parsing(line: Text, expected: Dict): + reader = EndToEndReader() + actual = reader._parse_item(line) + + assert actual.as_dict() == expected + + +@pytest.mark.parametrize( + "parse_data, expected_story_string", + [ + ( + { + "text": "/simple", + "parse_data": { + "intent": {"confidence": 1.0, "name": "simple"}, + "entities": [ + {"start": 0, "end": 5, "value": "great", "entity": "feeling"} + ], + }, + }, + "simple: /simple", + ), + ( + { + "text": "great", + "parse_data": { + "intent": {"confidence": 1.0, "name": "simple"}, + "entities": [ + {"start": 0, "end": 5, "value": "great", "entity": "feeling"} + ], + }, + }, + "simple: [great](feeling)", + ), + ( + { + "text": "great", + "parse_data": { + "intent": {"confidence": 1.0, "name": "simple"}, + "entities": [], + }, + }, + "simple: great", + ), + ], ) +def test_user_uttered_to_e2e(parse_data: Dict, expected_story_string: Text): + event = UserUttered.from_story_string("user", parse_data)[0] - -async def test_can_read_test_story(default_domain): - trackers = await training.load_data( - "data/test_stories/stories.md", - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - assert len(trackers) == 7 - # this should be the story simple_story_with_only_end -> show_it_all - # the generated stories are in a non stable order - therefore we need to - # do some trickery to find the one we want to test - tracker = [t for t in trackers if len(t.events) == 5][0] - assert tracker.events[0] == ActionExecuted("action_listen") - assert tracker.events[1] == UserUttered( - "simple", - intent={"name": "simple", "confidence": 1.0}, - parse_data={ - "text": "/simple", - "intent_ranking": [{"confidence": 1.0, "name": "simple"}], - "intent": {"confidence": 1.0, "name": "simple"}, - "entities": [], - }, - ) - assert tracker.events[2] == ActionExecuted("utter_default") - assert tracker.events[3] == ActionExecuted("utter_greet") - assert tracker.events[4] == ActionExecuted("action_listen") - - -async def test_can_read_test_story_with_checkpoint_after_or(default_domain): - trackers = await training.load_data( - "data/test_stories/stories_checkpoint_after_or.md", - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - # there should be only 2 trackers - assert len(trackers) == 2 - - -async def test_persist_and_read_test_story_graph(tmpdir, default_domain): - graph = await training.extract_story_graph( - "data/test_stories/stories.md", default_domain - ) - out_path = tmpdir.join("persisted_story.md") - with open(out_path.strpath, "w", encoding="utf-8") as f: - f.write(graph.as_story_string()) - - recovered_trackers = await training.load_data( - out_path.strpath, - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - existing_trackers = await training.load_data( - "data/test_stories/stories.md", - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - - existing_stories = {t.export_stories() for t in existing_trackers} - for t in recovered_trackers: - story_str = t.export_stories() - assert story_str in existing_stories - existing_stories.discard(story_str) - - -async def test_persist_and_read_test_story(tmpdir, default_domain): - graph = await training.extract_story_graph( - "data/test_stories/stories.md", default_domain - ) - out_path = tmpdir.join("persisted_story.md") - Story(graph.story_steps).dump_to_file(out_path.strpath) - - recovered_trackers = await training.load_data( - out_path.strpath, - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - existing_trackers = await training.load_data( - "data/test_stories/stories.md", - default_domain, - use_story_concatenation=False, - tracker_limit=1000, - remove_duplicates=False, - ) - existing_stories = {t.export_stories() for t in existing_trackers} - for t in recovered_trackers: - story_str = t.export_stories() - assert story_str in existing_stories - existing_stories.discard(story_str) - - -async def test_persist_form_story(tmpdir): - domain = Domain.load("data/test_domains/form.yml") - - tracker = DialogueStateTracker("", domain.slots) - - story = ( - "* greet\n" - " - utter_greet\n" - "* start_form\n" - " - some_form\n" - ' - form{"name": "some_form"}\n' - "* default\n" - " - utter_default\n" - " - some_form\n" - "* stop\n" - " - utter_ask_continue\n" - "* affirm\n" - " - some_form\n" - "* stop\n" - " - utter_ask_continue\n" - " - action_listen\n" - "* form: inform\n" - " - some_form\n" - ' - form{"name": null}\n' - "* goodbye\n" - " - utter_goodbye\n" - ) - - # simulate talking to the form - events = [ - UserUttered(intent={"name": "greet"}), - ActionExecuted("utter_greet"), - ActionExecuted("action_listen"), - # start the form - UserUttered(intent={"name": "start_form"}), - ActionExecuted("some_form"), - Form("some_form"), - ActionExecuted("action_listen"), - # out of form input - UserUttered(intent={"name": "default"}), - ActionExecutionRejected("some_form"), - ActionExecuted("utter_default"), - ActionExecuted("some_form"), - ActionExecuted("action_listen"), - # out of form input - UserUttered(intent={"name": "stop"}), - ActionExecutionRejected("some_form"), - ActionExecuted("utter_ask_continue"), - ActionExecuted("action_listen"), - # out of form input but continue with the form - UserUttered(intent={"name": "affirm"}), - FormValidation(False), - ActionExecuted("some_form"), - ActionExecuted("action_listen"), - # out of form input - UserUttered(intent={"name": "stop"}), - ActionExecutionRejected("some_form"), - ActionExecuted("utter_ask_continue"), - ActionExecuted("action_listen"), - # form input - UserUttered(intent={"name": "inform"}), - FormValidation(True), - ActionExecuted("some_form"), - ActionExecuted("action_listen"), - Form(None), - UserUttered(intent={"name": "goodbye"}), - ActionExecuted("utter_goodbye"), - ActionExecuted("action_listen"), - ] - [tracker.update(e) for e in events] - - assert story in tracker.export_stories() - - -async def test_read_story_file_with_cycles(tmpdir, default_domain): - graph = await training.extract_story_graph( - "data/test_stories/stories_with_cycle.md", default_domain - ) - - assert len(graph.story_steps) == 5 - - graph_without_cycles = graph.with_cycles_removed() - - assert graph.cyclic_edge_ids != set() - # sorting removed_edges converting set converting it to list - assert graph_without_cycles.cyclic_edge_ids == list() - - assert len(graph.story_steps) == len(graph_without_cycles.story_steps) == 5 - - assert len(graph_without_cycles.story_end_checkpoints) == 2 - - -async def test_generate_training_data_with_cycles(tmpdir, default_domain): - featurizer = MaxHistoryTrackerFeaturizer( - BinarySingleStateFeaturizer(), max_history=4 - ) - training_trackers = await training.load_data( - "data/test_stories/stories_with_cycle.md", default_domain, augmentation_factor=0 - ) - training_data = featurizer.featurize_trackers(training_trackers, default_domain) - y = training_data.y.argmax(axis=-1) - - # how many there are depends on the graph which is not created in a - # deterministic way but should always be 3 or 4 - assert len(training_trackers) == 3 or len(training_trackers) == 4 - - # if we have 4 trackers, there is going to be one example more for label 4 - num_threes = len(training_trackers) - 1 - # if new default actions are added the keys of the actions will be changed - - assert Counter(y) == {0: 6, 9: 3, 8: num_threes, 1: 2, 10: 1} - - -async def test_generate_training_data_with_unused_checkpoints(tmpdir, default_domain): - training_trackers = await training.load_data( - "data/test_stories/stories_unused_checkpoints.md", default_domain - ) - # there are 3 training stories: - # 2 with unused end checkpoints -> training_trackers - # 1 with unused start checkpoints -> ignored - assert len(training_trackers) == 2 - - -async def test_generate_training_data_original_and_augmented_trackers(default_domain): - training_trackers = await training.load_data( - "data/test_stories/stories_defaultdomain.md", - default_domain, - augmentation_factor=3, - ) - # there are three original stories - # augmentation factor of 3 indicates max of 3*10 augmented stories generated - # maximum number of stories should be augmented+original = 33 - original_trackers = [ - t - for t in training_trackers - if not hasattr(t, "is_augmented") or not t.is_augmented - ] - assert len(original_trackers) == 3 - assert len(training_trackers) <= 33 - - -async def test_visualize_training_data_graph(tmpdir, default_domain): - graph = await training.extract_story_graph( - "data/test_stories/stories_with_cycle.md", default_domain - ) - - graph = graph.with_cycles_removed() - - out_path = tmpdir.join("graph.html").strpath - - # this will be the plotted networkx graph - G = graph.visualize(out_path) - - assert os.path.exists(out_path) - - # we can't check the exact topology - but this should be enough to ensure - # the visualisation created a sane graph - assert set(G.nodes()) == set(range(-1, 13)) or set(G.nodes()) == set(range(-1, 14)) - if set(G.nodes()) == set(range(-1, 13)): - assert len(G.edges()) == 14 - elif set(G.nodes()) == set(range(-1, 14)): - assert len(G.edges()) == 16 - - -async def test_load_multi_file_training_data(default_domain): - # the stories file in `data/test_multifile_stories` is the same as in - # `data/test_stories/stories.md`, but split across multiple files - featurizer = MaxHistoryTrackerFeaturizer( - BinarySingleStateFeaturizer(), max_history=2 - ) - trackers = await training.load_data( - "data/test_stories/stories.md", default_domain, augmentation_factor=0 - ) - (tr_as_sts, tr_as_acts) = featurizer.training_states_and_actions( - trackers, default_domain - ) - hashed = [] - for sts, acts in zip(tr_as_sts, tr_as_acts): - hashed.append(json.dumps(sts + acts, sort_keys=True)) - hashed = sorted(hashed, reverse=True) - - data = featurizer.featurize_trackers(trackers, default_domain) - - featurizer_mul = MaxHistoryTrackerFeaturizer( - BinarySingleStateFeaturizer(), max_history=2 - ) - trackers_mul = await training.load_data( - "data/test_multifile_stories", default_domain, augmentation_factor=0 - ) - (tr_as_sts_mul, tr_as_acts_mul) = featurizer.training_states_and_actions( - trackers_mul, default_domain - ) - hashed_mul = [] - for sts_mul, acts_mul in zip(tr_as_sts_mul, tr_as_acts_mul): - hashed_mul.append(json.dumps(sts_mul + acts_mul, sort_keys=True)) - hashed_mul = sorted(hashed_mul, reverse=True) - - data_mul = featurizer_mul.featurize_trackers(trackers_mul, default_domain) - - assert hashed == hashed_mul - - assert np.all(data.X.sort(axis=0) == data_mul.X.sort(axis=0)) - assert np.all(data.y.sort(axis=0) == data_mul.y.sort(axis=0)) - - -async def test_load_training_data_handles_hidden_files(tmpdir, default_domain): - # create a hidden file - - with open(os.path.join(tmpdir.strpath, ".hidden"), "a") as f: - f.close() - # create a normal file - normal_file = os.path.join(tmpdir.strpath, "normal_file") - with open(normal_file, "a") as f: - f.close() - - featurizer = MaxHistoryTrackerFeaturizer( - BinarySingleStateFeaturizer(), max_history=2 - ) - trackers = await training.load_data(tmpdir.strpath, default_domain) - data = featurizer.featurize_trackers(trackers, default_domain) - - assert len(data.X) == 0 - assert len(data.y) == 0 + assert isinstance(event, UserUttered) + assert event.as_story_string(e2e=True) == expected_story_string -async def test_read_stories_with_multiline_comments(tmpdir, default_domain): - story_steps = await StoryFileReader.read_from_file( - "data/test_stories/stories_with_multiline_comments.md", - default_domain, - RegexInterpreter(), - ) +@pytest.mark.parametrize("line", [" greet{: hi"]) +def test_invalid_end_to_end_format(line: Text): + reader = EndToEndReader() - assert len(story_steps) == 4 - assert story_steps[0].block_name == "happy path" - assert len(story_steps[0].events) == 4 - assert story_steps[1].block_name == "sad path 1" - assert len(story_steps[1].events) == 7 - assert story_steps[2].block_name == "sad path 2" - assert len(story_steps[2].events) == 7 - assert story_steps[3].block_name == "say goodbye" - assert len(story_steps[3].events) == 2 + with pytest.raises(ValueError): + # noinspection PyProtectedMember + _ = reader._parse_item(line) diff --git a/tests/core/test_ensemble.py b/tests/core/test_ensemble.py index b6a8b0df6731..768541b70708 100644 --- a/tests/core/test_ensemble.py +++ b/tests/core/test_ensemble.py @@ -1,5 +1,12 @@ +from pathlib import Path +from typing import List, Any, Text + import pytest +import copy +from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter +from rasa.core.policies.fallback import FallbackPolicy +from rasa.core.policies.form_policy import FormPolicy from rasa.core.policies.policy import Policy from rasa.core.policies.ensemble import ( PolicyEnsemble, @@ -7,53 +14,89 @@ SimplePolicyEnsemble, ) from rasa.core.domain import Domain +from rasa.core.policies.rule_policy import RulePolicy from rasa.core.trackers import DialogueStateTracker -from rasa.core.events import UserUttered +from rasa.core.events import UserUttered, ActiveLoop, Event + +from tests.core import utilities +from rasa.core.actions.action import ( + ACTION_DEFAULT_FALLBACK_NAME, + ACTION_RESTART_NAME, + ACTION_LISTEN_NAME, +) +from rasa.core.constants import USER_INTENT_RESTART, FORM_POLICY_PRIORITY +from rasa.core.events import ActionExecuted +from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy +from rasa.core.policies.mapping_policy import MappingPolicy class WorkingPolicy(Policy): @classmethod - def load(cls, path): + def load(cls, _) -> Policy: return WorkingPolicy() - def persist(self, path): + def persist(self, _) -> None: pass - def train(self, training_trackers, domain, **kwargs): + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: pass - def predict_action_probabilities(self, tracker, domain): + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: pass - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, WorkingPolicy) -def test_policy_loading_simple(tmpdir): +def test_policy_loading_simple(tmp_path: Path): original_policy_ensemble = PolicyEnsemble([WorkingPolicy()]) - original_policy_ensemble.train([], None) - original_policy_ensemble.persist(str(tmpdir)) + original_policy_ensemble.train([], None, RegexInterpreter()) + original_policy_ensemble.persist(str(tmp_path)) - loaded_policy_ensemble = PolicyEnsemble.load(str(tmpdir)) + loaded_policy_ensemble = PolicyEnsemble.load(str(tmp_path)) assert original_policy_ensemble.policies == loaded_policy_ensemble.policies class ConstantPolicy(Policy): def __init__(self, priority: int = None, predict_index: int = None) -> None: - super(ConstantPolicy, self).__init__(priority=priority) + super().__init__(priority=priority) self.predict_index = predict_index @classmethod - def load(cls, path): + def load(cls, _) -> Policy: pass - def persist(self, path): + def persist(self, _) -> None: pass - def train(self, training_trackers, domain, **kwargs): + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: pass - def predict_action_probabilities(self, tracker, domain): + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: result = [0.0] * domain.num_actions result[self.predict_index] = 1.0 return result @@ -73,65 +116,220 @@ def test_policy_priority(): i = 1 # index of priority_2 in ensemble_0 result, best_policy = policy_ensemble_0.probabilities_using_best_policy( - tracker, domain + tracker, domain, RegexInterpreter() ) assert best_policy == "policy_{}_{}".format(i, type(priority_2).__name__) assert result == priority_2_result i = 0 # index of priority_2 in ensemble_1 result, best_policy = policy_ensemble_1.probabilities_using_best_policy( - tracker, domain + tracker, domain, RegexInterpreter() ) assert best_policy == "policy_{}_{}".format(i, type(priority_2).__name__) assert result == priority_2_result +def test_fallback_mapping_restart(): + domain = Domain.load("data/test_domains/default.yml") + events = [ + ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME, timestamp=1), + utilities.user_uttered(USER_INTENT_RESTART, 1, timestamp=2), + ] + tracker = DialogueStateTracker.from_events("test", events, []) + + two_stage_fallback_policy = TwoStageFallbackPolicy( + priority=2, deny_suggestion_intent_name="deny" + ) + mapping_policy = MappingPolicy(priority=1) + + mapping_fallback_ensemble = SimplePolicyEnsemble( + [two_stage_fallback_policy, mapping_policy] + ) + + result, best_policy = mapping_fallback_ensemble.probabilities_using_best_policy( + tracker, domain, RegexInterpreter() + ) + max_confidence_index = result.index(max(result)) + index_of_mapping_policy = 1 + next_action = domain.action_for_index(max_confidence_index, None) + + assert best_policy == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}" + assert next_action.name() == ACTION_RESTART_NAME + + +@pytest.mark.parametrize( + "events", + [ + [ + ActiveLoop("test-form"), + ActionExecuted(ACTION_LISTEN_NAME), + utilities.user_uttered(USER_INTENT_RESTART, 1), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + utilities.user_uttered(USER_INTENT_RESTART, 1), + ], + ], +) +def test_mapping_wins_over_form(events: List[Event]): + domain = """ + forms: + - test-form + """ + domain = Domain.from_yaml(domain) + tracker = DialogueStateTracker.from_events("test", events, []) + + ensemble = SimplePolicyEnsemble( + [ + MappingPolicy(), + ConstantPolicy(priority=1, predict_index=0), + FormPolicy(), + FallbackPolicy(), + ] + ) + result, best_policy = ensemble.probabilities_using_best_policy( + tracker, domain, RegexInterpreter() + ) + + max_confidence_index = result.index(max(result)) + next_action = domain.action_for_index(max_confidence_index, None) + + index_of_mapping_policy = 0 + assert best_policy == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}" + assert next_action.name() == ACTION_RESTART_NAME + + +@pytest.mark.parametrize( + "ensemble", + [ + SimplePolicyEnsemble( + [ + FormPolicy(), + ConstantPolicy(FORM_POLICY_PRIORITY - 1, 0), + FallbackPolicy(), + ] + ), + SimplePolicyEnsemble([FormPolicy(), MappingPolicy()]), + ], +) +def test_form_wins_over_everything_else(ensemble: SimplePolicyEnsemble): + form_name = "test-form" + domain = f""" + forms: + - {form_name} + """ + domain = Domain.from_yaml(domain) + + events = [ + ActiveLoop("test-form"), + ActionExecuted(ACTION_LISTEN_NAME), + utilities.user_uttered("test", 1), + ] + tracker = DialogueStateTracker.from_events("test", events, []) + result, best_policy = ensemble.probabilities_using_best_policy( + tracker, domain, RegexInterpreter() + ) + + max_confidence_index = result.index(max(result)) + next_action = domain.action_for_index(max_confidence_index, None) + + index_of_form_policy = 0 + assert best_policy == f"policy_{index_of_form_policy}_{FormPolicy.__name__}" + assert next_action.name() == form_name + + +def test_fallback_wins_over_mapping(): + domain = Domain.load("data/test_domains/default.yml") + events = [ + ActionExecuted(ACTION_LISTEN_NAME), + # Low confidence should trigger fallback + utilities.user_uttered(USER_INTENT_RESTART, 0.0001), + ] + tracker = DialogueStateTracker.from_events("test", events, []) + + ensemble = SimplePolicyEnsemble([FallbackPolicy(), MappingPolicy()]) + + result, best_policy = ensemble.probabilities_using_best_policy( + tracker, domain, RegexInterpreter() + ) + max_confidence_index = result.index(max(result)) + index_of_fallback_policy = 0 + next_action = domain.action_for_index(max_confidence_index, None) + + assert best_policy == f"policy_{index_of_fallback_policy}_{FallbackPolicy.__name__}" + assert next_action.name() == ACTION_DEFAULT_FALLBACK_NAME + + class LoadReturnsNonePolicy(Policy): @classmethod - def load(cls, path): + def load(cls, _) -> None: return None - def persist(self, path): + def persist(self, _) -> None: pass - def train(self, training_trackers, domain, **kwargs): + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: pass - def predict_action_probabilities(self, tracker, domain): + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: pass -def test_policy_loading_load_returns_none(tmpdir): +def test_policy_loading_load_returns_none(tmp_path: Path): original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()]) - original_policy_ensemble.train([], None) - original_policy_ensemble.persist(str(tmpdir)) + original_policy_ensemble.train([], None, RegexInterpreter()) + original_policy_ensemble.persist(str(tmp_path)) with pytest.raises(Exception): - PolicyEnsemble.load(str(tmpdir)) + PolicyEnsemble.load(str(tmp_path)) class LoadReturnsWrongTypePolicy(Policy): @classmethod - def load(cls, path): + def load(cls, _) -> Text: return "" - def persist(self, path): + def persist(self, _) -> None: pass - def train(self, training_trackers, domain, **kwargs): + def train( + self, + training_trackers: List[DialogueStateTracker], + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs: Any, + ) -> None: pass - def predict_action_probabilities(self, tracker, domain): + def predict_action_probabilities( + self, + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter = RegexInterpreter(), + **kwargs: Any, + ) -> List[float]: pass -def test_policy_loading_load_returns_wrong_type(tmpdir): +def test_policy_loading_load_returns_wrong_type(tmp_path: Path): original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()]) - original_policy_ensemble.train([], None) - original_policy_ensemble.persist(str(tmpdir)) + original_policy_ensemble.train([], None, RegexInterpreter()) + original_policy_ensemble.persist(str(tmp_path)) with pytest.raises(Exception): - PolicyEnsemble.load(str(tmpdir)) + PolicyEnsemble.load(str(tmp_path)) @pytest.mark.parametrize( @@ -158,3 +356,69 @@ def test_valid_policy_configurations(valid_config): def test_invalid_policy_configurations(invalid_config): with pytest.raises(InvalidPolicyConfig): PolicyEnsemble.from_dict(invalid_config) + + +def test_from_dict_does_not_change_passed_dict_parameter(): + config = { + "policies": [ + { + "name": "TEDPolicy", + "featurizer": [ + { + "name": "MaxHistoryTrackerFeaturizer", + "max_history": 5, + "state_featurizer": [{"name": "BinarySingleStateFeaturizer"}], + } + ], + } + ] + } + + config_copy = copy.deepcopy(config) + PolicyEnsemble.from_dict(config_copy) + + assert config == config_copy + + +def test_rule_based_data_warnings_no_rule_trackers(): + trackers = [DialogueStateTracker("some-id", slots=[], is_rule_tracker=False)] + policies = [RulePolicy()] + ensemble = SimplePolicyEnsemble(policies) + + with pytest.warns(UserWarning) as record: + ensemble.train(trackers, Domain.empty(), RegexInterpreter()) + + assert ( + "Found a rule-based policy in your pipeline but no rule-based training data." + ) in record[0].message.args[0] + + +def test_rule_based_data_warnings_no_rule_policy(): + trackers = [DialogueStateTracker("some-id", slots=[], is_rule_tracker=True)] + policies = [FallbackPolicy()] + ensemble = SimplePolicyEnsemble(policies) + + with pytest.warns(UserWarning) as record: + ensemble.train(trackers, Domain.empty(), RegexInterpreter()) + + assert ( + "Found rule-based training data but no policy supporting rule-based data." + ) in record[0].message.args[0] + + +@pytest.mark.parametrize( + "policies", + [ + ["RulePolicy", "MappingPolicy"], + ["RulePolicy", "FallbackPolicy"], + ["RulePolicy", "TwoStageFallbackPolicy"], + ["RulePolicy", "FormPolicy"], + ["RulePolicy", "FallbackPolicy", "FormPolicy"], + ], +) +def test_mutual_exclusion_of_rule_policy_and_old_rule_like_policies( + policies: List[Text], +): + policy_config = [{"name": policy_name} for policy_name in policies] + with pytest.raises(InvalidPolicyConfig): + PolicyEnsemble.from_dict({"policies": policy_config}) diff --git a/tests/core/test_evaluation.py b/tests/core/test_evaluation.py index bc377623ea87..89113d93e220 100644 --- a/tests/core/test_evaluation.py +++ b/tests/core/test_evaluation.py @@ -1,6 +1,21 @@ import os +from pathlib import Path +from typing import Any, Text, Dict -from rasa.core.test import _generate_trackers, collect_story_predictions, test +import pytest + +import rasa.utils.io +from rasa.core.test import ( + _generate_trackers, + _collect_story_predictions, + test as evaluate_stories, + FAILED_STORIES_FILE, + CONFUSION_MATRIX_STORIES_FILE, + REPORT_STORIES_FILE, + SUCCESSFUL_STORIES_FILE, + _clean_entity_results, +) +from rasa.core.policies.memoization import MemoizationPolicy # we need this import to ignore the warning... # noinspection PyUnresolvedReferences @@ -10,75 +25,62 @@ DEFAULT_STORIES_FILE, E2E_STORY_FILE_UNKNOWN_ENTITY, END_TO_END_STORY_FILE, + E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER, + STORY_FILE_TRIPS_CIRCUIT_BREAKER, ) -async def test_evaluation_image_creation(tmpdir, default_agent): - stories_path = os.path.join(tmpdir.strpath, "failed_stories.md") - img_path = os.path.join(tmpdir.strpath, "story_confmat.pdf") +async def test_evaluation_file_creation(tmpdir: Path, default_agent: Agent): + failed_stories_path = str(tmpdir / FAILED_STORIES_FILE) + success_stories_path = str(tmpdir / SUCCESSFUL_STORIES_FILE) + report_path = str(tmpdir / REPORT_STORIES_FILE) + confusion_matrix_path = str(tmpdir / CONFUSION_MATRIX_STORIES_FILE) - await test( + await evaluate_stories( stories=DEFAULT_STORIES_FILE, agent=default_agent, - out_directory=tmpdir.strpath, + out_directory=str(tmpdir), max_stories=None, e2e=False, + errors=True, + successes=True, ) - assert os.path.isfile(img_path) - assert os.path.isfile(stories_path) + assert os.path.isfile(failed_stories_path) + assert os.path.isfile(success_stories_path) + assert os.path.isfile(report_path) + assert os.path.isfile(confusion_matrix_path) -async def test_end_to_end_evaluation_script(tmpdir, restaurantbot): - restaurantbot = Agent.load(restaurantbot) +async def test_end_to_end_evaluation_script(default_agent: Agent): completed_trackers = await _generate_trackers( - END_TO_END_STORY_FILE, restaurantbot, use_e2e=True + END_TO_END_STORY_FILE, default_agent, use_e2e=True ) - story_evaluation, num_stories = collect_story_predictions( - completed_trackers, restaurantbot, use_e2e=True + story_evaluation, num_stories = _collect_story_predictions( + completed_trackers, default_agent, use_e2e=True ) serialised_store = [ - "utter_ask_howcanhelp", - "action_listen", - "utter_ask_howcanhelp", - "action_listen", - "utter_on_it", - "utter_ask_cuisine", + "utter_greet", "action_listen", - "utter_ask_numpeople", + "utter_greet", "action_listen", - "utter_ask_howcanhelp", + "utter_default", "action_listen", - "utter_on_it", - "utter_ask_numpeople", + "utter_goodbye", "action_listen", - "utter_ask_moreupdates", + "utter_greet", "action_listen", - "utter_ask_moreupdates", - "action_listen", - "utter_ack_dosearch", - "action_search_restaurants", - "action_suggest", + "utter_default", "action_listen", "greet", "greet", - "inform", - "inform", + "default", + "goodbye", "greet", - "inform", - "inform", - "inform", - "deny", - "[moderately](price:moderate)", - "[east](location)", - "[french](cuisine)", - "[cheap](price:lo)", - "[french](cuisine)", - "[bombay](location)", - "[six](people:6)", - "[moderately](price:moderate)", + "default", + '[{"name": "Max"}]{"entity": "name", "value": "Max"}', ] assert story_evaluation.evaluation_store.serialise()[0] == serialised_store @@ -87,15 +89,150 @@ async def test_end_to_end_evaluation_script(tmpdir, restaurantbot): assert num_stories == 3 -async def test_end_to_end_evaluation_script_unknown_entity(tmpdir, default_agent): +async def test_end_to_end_evaluation_script_unknown_entity(default_agent: Agent): completed_trackers = await _generate_trackers( E2E_STORY_FILE_UNKNOWN_ENTITY, default_agent, use_e2e=True ) - story_evaluation, num_stories = collect_story_predictions( + story_evaluation, num_stories = _collect_story_predictions( completed_trackers, default_agent, use_e2e=True ) assert story_evaluation.evaluation_store.has_prediction_target_mismatch() assert len(story_evaluation.failed_stories) == 1 assert num_stories == 1 + + +async def test_end_to_evaluation_with_forms(form_bot_agent: Agent): + test_stories = await _generate_trackers( + "data/test_evaluations/form-end-to-end-stories.md", form_bot_agent, use_e2e=True + ) + + story_evaluation, num_stories = _collect_story_predictions( + test_stories, form_bot_agent, use_e2e=True + ) + + assert not story_evaluation.evaluation_store.has_prediction_target_mismatch() + + +async def test_source_in_failed_stories(tmpdir: Path, default_agent: Agent): + stories_path = str(tmpdir / FAILED_STORIES_FILE) + + await evaluate_stories( + stories=E2E_STORY_FILE_UNKNOWN_ENTITY, + agent=default_agent, + out_directory=str(tmpdir), + max_stories=None, + e2e=False, + ) + + failed_stories = rasa.utils.io.read_file(stories_path) + + assert ( + f"## simple_story_with_unknown_entity ({E2E_STORY_FILE_UNKNOWN_ENTITY})" + in failed_stories + ) + + +async def test_end_to_evaluation_trips_circuit_breaker(): + agent = Agent( + domain="data/test_domains/default.yml", + policies=[MemoizationPolicy(max_history=11)], + ) + training_data = await agent.load_data(STORY_FILE_TRIPS_CIRCUIT_BREAKER) + agent.train(training_data) + + test_stories = await _generate_trackers( + E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER, agent, use_e2e=True + ) + + story_evaluation, num_stories = _collect_story_predictions( + test_stories, agent, use_e2e=True + ) + + circuit_trip_predicted = [ + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "utter_greet", + "circuit breaker tripped", + "circuit breaker tripped", + ] + + assert ( + story_evaluation.evaluation_store.action_predictions == circuit_trip_predicted + ) + assert num_stories == 1 + + +@pytest.mark.parametrize( + "text, entity, expected_entity", + [ + ( + "The first one please.", + { + "extractor": "DucklingHTTPExtractor", + "entity": "ordinal", + "confidence": 0.87, + "start": 4, + "end": 9, + "value": 1, + }, + { + "text": "The first one please.", + "entity": "ordinal", + "start": 4, + "end": 9, + "value": "1", + }, + ), + ( + "The first one please.", + { + "extractor": "CRFEntityExtractor", + "entity": "ordinal", + "confidence": 0.87, + "start": 4, + "end": 9, + "value": "1", + }, + { + "text": "The first one please.", + "entity": "ordinal", + "start": 4, + "end": 9, + "value": "1", + }, + ), + ( + "Italian food", + { + "extractor": "DIETClassifier", + "entity": "cuisine", + "confidence": 0.99, + "start": 0, + "end": 7, + "value": "Italian", + }, + { + "text": "Italian food", + "entity": "cuisine", + "start": 0, + "end": 7, + "value": "Italian", + }, + ), + ], +) +def test_event_has_proper_implementation( + text: Text, entity: Dict[Text, Any], expected_entity: Dict[Text, Any] +): + actual_entities = _clean_entity_results(text, [entity]) + + assert actual_entities[0] == expected_entity diff --git a/tests/core/test_events.py b/tests/core/test_events.py index 98e8b3fc9fc0..dcc34455a95d 100644 --- a/tests/core/test_events.py +++ b/tests/core/test_events.py @@ -1,11 +1,13 @@ -import time - -import pytz -from datetime import datetime import copy import pytest +import pytz +import time +from datetime import datetime from dateutil import parser +from typing import Type + +from rasa.core import utils from rasa.core.events import ( Event, UserUttered, @@ -14,6 +16,7 @@ ActionExecuted, AllSlotsReset, ReminderScheduled, + ReminderCancelled, ConversationResumed, ConversationPaused, StoryExported, @@ -22,6 +25,7 @@ FollowupAction, UserUtteranceReverted, AgentUttered, + SessionStarted, ) @@ -40,6 +44,7 @@ (StoryExported(), None), (ActionReverted(), None), (UserUtteranceReverted(), None), + (SessionStarted(), None), (ActionExecuted("my_action"), ActionExecuted("my_other_action")), (FollowupAction("my_action"), FollowupAction("my_other_action")), ( @@ -51,8 +56,8 @@ AgentUttered("my_other_test", "my_other_data"), ), ( - ReminderScheduled("my_action", datetime.now()), - ReminderScheduled("my_other_action", datetime.now()), + ReminderScheduled("my_intent", datetime.now()), + ReminderScheduled("my_other_intent", datetime.now()), ), ], ) @@ -91,13 +96,14 @@ def test_event_has_proper_implementation(one_event, another_event): StoryExported(), ActionReverted(), UserUtteranceReverted(), + SessionStarted(), ActionExecuted("my_action"), - ActionExecuted("my_action", "policy_1_KerasPolicy", 0.8), + ActionExecuted("my_action", "policy_1_TEDPolicy", 0.8), FollowupAction("my_action"), BotUttered("my_text", {"my_data": 1}), AgentUttered("my_text", "my_data"), - ReminderScheduled("my_action", datetime.now()), - ReminderScheduled("my_action", datetime.now(pytz.timezone("US/Central"))), + ReminderScheduled("my_intent", datetime.now()), + ReminderScheduled("my_intent", datetime.now(pytz.timezone("US/Central"))), ], ) def test_dict_serialisation(one_event): @@ -107,43 +113,32 @@ def test_dict_serialisation(one_event): def test_json_parse_setslot(): - # DOCS MARKER SetSlot evt = {"event": "slot", "name": "departure_airport", "value": "BER"} - # DOCS END assert Event.from_parameters(evt) == SlotSet("departure_airport", "BER") def test_json_parse_restarted(): - # DOCS MARKER Restarted evt = {"event": "restart"} - # DOCS END assert Event.from_parameters(evt) == Restarted() +def test_json_parse_session_started(): + evt = {"event": "session_started"} + assert Event.from_parameters(evt) == SessionStarted() + + def test_json_parse_reset(): - # DOCS MARKER AllSlotsReset evt = {"event": "reset_slots"} - # DOCS END assert Event.from_parameters(evt) == AllSlotsReset() def test_json_parse_user(): - # fmt: off - # DOCS MARKER UserUttered - evt={ - "event": "user", - "text": "Hey", - "parse_data": { - "intent": { - "name": "greet", - "confidence": 0.9 - }, - "entities": [] - }, - "metadata": {}, - } - # DOCS END - # fmt: on + evt = { + "event": "user", + "text": "Hey", + "parse_data": {"intent": {"name": "greet", "confidence": 0.9}, "entities": []}, + "metadata": {}, + } assert Event.from_parameters(evt) == UserUttered( "Hey", intent={"name": "greet", "confidence": 0.9}, @@ -154,85 +149,86 @@ def test_json_parse_user(): def test_json_parse_bot(): - # DOCS MARKER BotUttered evt = {"event": "bot", "text": "Hey there!", "data": {}} - # DOCS END assert Event.from_parameters(evt) == BotUttered("Hey there!", {}) def test_json_parse_rewind(): - # DOCS MARKER UserUtteranceReverted evt = {"event": "rewind"} - # DOCS END assert Event.from_parameters(evt) == UserUtteranceReverted() def test_json_parse_reminder(): - # fmt: off - # DOCS MARKER ReminderScheduled - evt={ - "event": "reminder", - "action": "my_action", - "date_time": "2018-09-03T11:41:10.128172", - "name": "my_reminder", - "kill_on_user_msg": True, - } - # DOCS END - # fmt: on + evt = { + "event": "reminder", + "intent": "my_intent", + "entities": {"entity1": "value1", "entity2": "value2"}, + "date_time": "2018-09-03T11:41:10.128172", + "name": "my_reminder", + "kill_on_user_msg": True, + } assert Event.from_parameters(evt) == ReminderScheduled( - "my_action", + "my_intent", parser.parse("2018-09-03T11:41:10.128172"), name="my_reminder", kill_on_user_message=True, ) +def test_json_parse_reminder_cancelled(): + evt = { + "event": "cancel_reminder", + "name": "my_reminder", + "intent": "my_intent", + "entities": [ + {"entity": "entity1", "value": "value1"}, + {"entity": "entity2", "value": "value2"}, + ], + "date_time": "2018-09-03T11:41:10.128172", + } + assert Event.from_parameters(evt) == ReminderCancelled( + name="my_reminder", + intent="my_intent", + entities=[ + {"entity": "entity1", "value": "value1"}, + {"entity": "entity2", "value": "value2"}, + ], + timestamp=parser.parse("2018-09-03T11:41:10.128172"), + ) + + def test_json_parse_undo(): - # DOCS MARKER ActionReverted evt = {"event": "undo"} - # DOCS END assert Event.from_parameters(evt) == ActionReverted() def test_json_parse_export(): - # DOCS MARKER StoryExported evt = {"event": "export"} - # DOCS END assert Event.from_parameters(evt) == StoryExported() def test_json_parse_followup(): - # DOCS MARKER FollowupAction evt = {"event": "followup", "name": "my_action"} - # DOCS END assert Event.from_parameters(evt) == FollowupAction("my_action") def test_json_parse_pause(): - # DOCS MARKER ConversationPaused evt = {"event": "pause"} - # DOCS END assert Event.from_parameters(evt) == ConversationPaused() def test_json_parse_resume(): - # DOCS MARKER ConversationResumed evt = {"event": "resume"} - # DOCS END assert Event.from_parameters(evt) == ConversationResumed() def test_json_parse_action(): - # DOCS MARKER ActionExecuted evt = {"event": "action", "name": "my_action"} - # DOCS END assert Event.from_parameters(evt) == ActionExecuted("my_action") def test_json_parse_agent(): - # DOCS MARKER AgentUttered evt = {"event": "agent", "text": "Hey, how are you?"} - # DOCS END assert Event.from_parameters(evt) == AgentUttered("Hey, how are you?") @@ -242,7 +238,6 @@ def test_json_parse_agent(): UserUttered, BotUttered, ActionReverted, - Event, Restarted, AllSlotsReset, ConversationResumed, @@ -267,3 +262,41 @@ def test_correct_timestamp_setting(event_class): event2 = event_class("test") assert event.timestamp < event2.timestamp + + +@pytest.mark.parametrize("event_class", utils.all_subclasses(Event)) +def test_event_metadata_dict(event_class: Type[Event]): + metadata = {"foo": "bar", "quux": 42} + + # Create the event from a `dict` that will be accepted by the + # `_from_parameters` method of any `Event` subclass (the values themselves + # are not important). + event = Event.from_parameters( + { + "metadata": metadata, + "event": event_class.type_name, + "parse_data": {}, + "date_time": "2019-11-20T16:09:16Z", + } + ) + assert event.as_dict()["metadata"] == metadata + + +@pytest.mark.parametrize("event_class", utils.all_subclasses(Event)) +def test_event_default_metadata(event_class: Type[Event]): + # Create an event without metadata. When converting the `Event` to a + # `dict`, it should not include a `metadata` property - unless it's a + # `UserUttered` or a `BotUttered` event (or subclasses of them), in which + # case the metadata should be included with a default value of {}. + event = Event.from_parameters( + { + "event": event_class.type_name, + "parse_data": {}, + "date_time": "2019-11-20T16:09:16Z", + } + ) + + if isinstance(event, BotUttered) or isinstance(event, UserUttered): + assert event.as_dict()["metadata"] == {} + else: + assert "metadata" not in event.as_dict() diff --git a/tests/core/test_examples.py b/tests/core/test_examples.py index f450014a6f7f..8bb7d1ace590 100644 --- a/tests/core/test_examples.py +++ b/tests/core/test_examples.py @@ -1,28 +1,19 @@ -import asyncio import sys import json -import os -import pytest +from pathlib import Path +from typing import Text + from aioresponses import aioresponses -import rasa.utils.io from rasa.core.agent import Agent from rasa.core.train import train from rasa.core.utils import AvailableEndpoints +from rasa.importers.importer import TrainingDataImporter from rasa.utils.endpoints import EndpointConfig, ClientResponseError -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - -async def test_moodbot_example(unpacked_trained_moodbot_path): +async def test_moodbot_example(unpacked_trained_moodbot_path: Text): agent = Agent.load(unpacked_trained_moodbot_path) responses = await agent.handle_text("/greet") @@ -37,72 +28,108 @@ async def test_moodbot_example(unpacked_trained_moodbot_path): async def test_formbot_example(): sys.path.append("examples/formbot/") + project = Path("examples/formbot/") + config = str(project / "config.yml") + domain = str(project / "domain.yml") + training_dir = project / "data" + training_files = [ + str(training_dir / "rules.yml"), + str(training_dir / "stories.yml"), + ] + importer = TrainingDataImporter.load_from_config(config, domain, training_files) - p = "examples/formbot/" - stories = os.path.join(p, "data", "stories.md") endpoint = EndpointConfig("https://example.com/webhooks/actions") endpoints = AvailableEndpoints(action=endpoint) agent = await train( - os.path.join(p, "domain.yml"), - stories, - os.path.join(p, "models", "dialogue"), + domain, + importer, + str(project / "models" / "dialogue"), endpoints=endpoints, - policy_config="rasa/cli/default_config.yml", + policy_config="examples/formbot/config.yml", + ) + + async def mock_form_happy_path(input_text, output_text, slot=None): + if slot: + form = "restaurant_form" + template = f"utter_ask_{slot}" + else: + form = None + template = "utter_submit" + response = { + "events": [ + {"event": "form", "name": form, "timestamp": None}, + { + "event": "slot", + "timestamp": None, + "name": "requested_slot", + "value": slot, + }, + ], + "responses": [{"template": template}], + } + with aioresponses() as mocked: + mocked.post( + "https://example.com/webhooks/actions", payload=response, repeat=True + ) + responses = await agent.handle_text(input_text) + assert responses[0]["text"] == output_text + + async def mock_form_unhappy_path(input_text, output_text, slot): + response_error = { + "error": f"Failed to extract slot {slot} with action restaurant_form", + "action_name": "restaurant_form", + } + with aioresponses() as mocked: + # noinspection PyTypeChecker + mocked.post( + "https://example.com/webhooks/actions", + repeat=True, + exception=ClientResponseError(400, "", json.dumps(response_error)), + ) + responses = await agent.handle_text(input_text) + assert responses[0]["text"] == output_text + + await mock_form_happy_path("/request_restaurant", "what cuisine?", slot="cuisine") + await mock_form_unhappy_path("/chitchat", "chitchat", slot="cuisine") + await mock_form_happy_path( + '/inform{"cuisine": "mexican"}', "how many people?", slot="num_people" ) - response = { - "events": [ - {"event": "form", "name": "restaurant_form", "timestamp": None}, - { - "event": "slot", - "timestamp": None, - "name": "requested_slot", - "value": "cuisine", - }, - ], - "responses": [{"template": "utter_ask_cuisine"}], - } - - with aioresponses() as mocked: - mocked.post( - "https://example.com/webhooks/actions", payload=response, repeat=True - ) - - responses = await agent.handle_text("/request_restaurant") - - assert responses[0]["text"] == "what cuisine?" - - response = { - "error": "Failed to validate slot cuisine with action restaurant_form", - "action_name": "restaurant_form", - } - - with aioresponses() as mocked: - # noinspection PyTypeChecker - mocked.post( - "https://example.com/webhooks/actions", - repeat=True, - exception=ClientResponseError(400, "", json.dumps(response)), - ) - - responses = await agent.handle_text("/chitchat") - - assert responses[0]["text"] == "chitchat" - - -async def test_restaurantbot_example(): - sys.path.append("examples/restaurantbot/") - from run import train_core, train_nlu, parse - - p = "examples/restaurantbot/" - stories = os.path.join("data", "test_stories", "stories_babi_small.md") - nlu_data = os.path.join(p, "data", "nlu.md") - await train_core( - os.path.join(p, "domain.yml"), os.path.join(p, "models"), "current", stories + await mock_form_happy_path( + '/inform{"number": "2"}', "do you want to seat outside?", slot="outdoor_seating" ) - train_nlu( - os.path.join(p, "config.yml"), os.path.join(p, "models"), "current", nlu_data + await mock_form_happy_path( + "/affirm", "please provide additional preferences", slot="preferences" ) - responses = await parse("hello", os.path.join(p, "models", "current")) + responses = await agent.handle_text("/restart") + assert responses[0]["text"] == "restarted" + + responses = await agent.handle_text("/greet") + assert ( + responses[0]["text"] + == "Hello! I am restaurant search assistant! How can I help?" + ) + + await mock_form_happy_path("/request_restaurant", "what cuisine?", slot="cuisine") + await mock_form_happy_path( + '/inform{"cuisine": "mexican"}', "how many people?", slot="num_people" + ) + await mock_form_happy_path( + '/inform{"number": "2"}', "do you want to seat outside?", slot="outdoor_seating" + ) + await mock_form_unhappy_path( + "/stop", "do you want to continue?", slot="outdoor_seating" + ) + await mock_form_happy_path( + "/affirm", "do you want to seat outside?", slot="outdoor_seating" + ) + await mock_form_happy_path( + "/affirm", "please provide additional preferences", slot="preferences" + ) + await mock_form_happy_path( + "/deny", "please give your feedback on your experience so far", slot="feedback" + ) + await mock_form_happy_path('/inform{"feedback": "great"}', "All done!") - assert responses[0]["text"] == "how can I help you?" + responses = await agent.handle_text("/thankyou") + assert responses[0]["text"] == "you are welcome :)" diff --git a/tests/core/test_exporter.py b/tests/core/test_exporter.py new file mode 100644 index 000000000000..9c449512750a --- /dev/null +++ b/tests/core/test_exporter.py @@ -0,0 +1,292 @@ +import uuid +from pathlib import Path +from typing import Optional, Dict, Any, Text, List +from unittest.mock import Mock + +import pytest + +from rasa.core.actions.action import ACTION_SESSION_START_NAME +from rasa.core.domain import Domain + +import rasa.utils.io as io_utils +from rasa.core.brokers.pika import PikaEventBroker +from rasa.core.brokers.sql import SQLEventBroker +from rasa.core.constants import RASA_EXPORT_PROCESS_ID_HEADER_NAME +from rasa.core.events import SessionStarted, ActionExecuted +from rasa.core.tracker_store import SQLTrackerStore +from rasa.core.trackers import DialogueStateTracker +from rasa.exceptions import ( + NoConversationsInTrackerStoreError, + NoEventsToMigrateError, + NoEventsInTimeRangeError, + PublishingError, +) +from tests.conftest import MockExporter, random_user_uttered_event + + +def _write_endpoint_config_to_yaml(path: Path, data: Dict[Text, Any]) -> Path: + endpoints_path = path / "endpoints.yml" + + # write endpoints config to file + io_utils.write_yaml(data, endpoints_path) + return endpoints_path + + +@pytest.mark.parametrize( + "requested_ids,available_ids,expected", + [(["1"], ["1"], ["1"]), (["1", "2"], ["2"], ["2"]), (None, ["2"], ["2"])], +) +def test_get_conversation_ids_to_process( + requested_ids: Optional[List[Text]], + available_ids: Optional[List[Text]], + expected: Optional[List[Text]], +): + # create and mock tracker store containing `available_ids` as keys + tracker_store = Mock() + tracker_store.keys.return_value = available_ids + + exporter = MockExporter(tracker_store) + exporter.requested_conversation_ids = requested_ids + + # noinspection PyProtectedMember + assert exporter._get_conversation_ids_to_process() == set(expected) + + +@pytest.mark.parametrize( + "requested_ids,available_ids,exception", + [ + (["1"], [], NoConversationsInTrackerStoreError), # no IDs in tracker store + (None, [], NoConversationsInTrackerStoreError), # without requested IDs + ( + ["1", "2", "3"], + ["4", "5", "6"], + NoEventsToMigrateError, + ), # no overlap between requested IDs and those available + ], +) +def test_get_conversation_ids_to_process_error( + requested_ids: Optional[List[Text]], available_ids: List[Text], exception: Exception +): + # create and mock tracker store containing `available_ids` as keys + tracker_store = Mock() + tracker_store.keys.return_value = available_ids + + exporter = MockExporter(tracker_store) + exporter.requested_conversation_ids = requested_ids + + with pytest.raises(exception): + # noinspection PyProtectedMember + exporter._get_conversation_ids_to_process() + + +def test_fetch_events_within_time_range(): + conversation_ids = ["some-id", "another-id"] + + # prepare events from different senders and different timestamps + event_1 = random_user_uttered_event(3) + event_2 = random_user_uttered_event(2) + event_3 = random_user_uttered_event(1) + events = {conversation_ids[0]: [event_1, event_2], conversation_ids[1]: [event_3]} + + def _get_tracker(conversation_id: Text) -> DialogueStateTracker: + return DialogueStateTracker.from_events( + conversation_id, events[conversation_id] + ) + + # create mock tracker store + tracker_store = Mock() + tracker_store.retrieve.side_effect = _get_tracker + tracker_store.keys.return_value = conversation_ids + + exporter = MockExporter(tracker_store) + exporter.requested_conversation_ids = conversation_ids + + # noinspection PyProtectedMember + fetched_events = exporter._fetch_events_within_time_range() + + # events should come back for all requested conversation IDs + assert all( + any(_id in event["sender_id"] for event in fetched_events) + for _id in conversation_ids + ) + + # events are sorted by timestamp despite the initially different order + assert fetched_events == list(sorted(fetched_events, key=lambda e: e["timestamp"])) + + +def test_fetch_events_within_time_range_tracker_does_not_err(): + # create mock tracker store that returns `None` on `retrieve()` + tracker_store = Mock() + tracker_store.retrieve.return_value = None + tracker_store.keys.return_value = [uuid.uuid4()] + + exporter = MockExporter(tracker_store) + + # no events means `NoEventsInTimeRangeError` + with pytest.raises(NoEventsInTimeRangeError): + # noinspection PyProtectedMember + exporter._fetch_events_within_time_range() + + +def test_fetch_events_within_time_range_tracker_contains_no_events(): + # create mock tracker store that returns `None` on `retrieve()` + tracker_store = Mock() + tracker_store.retrieve.return_value = DialogueStateTracker.from_events( + "a great ID", [] + ) + tracker_store.keys.return_value = ["a great ID"] + + exporter = MockExporter(tracker_store) + + # no events means `NoEventsInTimeRangeError` + with pytest.raises(NoEventsInTimeRangeError): + # noinspection PyProtectedMember + exporter._fetch_events_within_time_range() + + +def test_fetch_events_within_time_range_with_session_events(tmp_path: Path): + conversation_id = "test_fetch_events_within_time_range_with_sessions" + + tracker_store = SQLTrackerStore( + dialect="sqlite", + db=str(tmp_path / f"{uuid.uuid4().hex}.db"), + domain=Domain.empty(), + ) + + events = [ + random_user_uttered_event(1), + SessionStarted(2), + ActionExecuted(timestamp=3, action_name=ACTION_SESSION_START_NAME), + random_user_uttered_event(4), + ] + tracker = DialogueStateTracker.from_events(conversation_id, evts=events) + tracker_store.save(tracker) + + exporter = MockExporter(tracker_store=tracker_store) + + # noinspection PyProtectedMember + fetched_events = exporter._fetch_events_within_time_range() + + assert len(fetched_events) == len(events) + + +# noinspection PyProtectedMember +def test_sort_and_select_events_by_timestamp(): + events = [ + event.as_dict() + for event in [ + random_user_uttered_event(3), + random_user_uttered_event(2), + random_user_uttered_event(1), + ] + ] + + tracker_store = Mock() + exporter = MockExporter(tracker_store) + + selected_events = exporter._sort_and_select_events_by_timestamp(events) + + # events are sorted + assert selected_events == list( + sorted(selected_events, key=lambda e: e["timestamp"]) + ) + + # apply minimum timestamp requirement, expect to get only two events back + exporter.minimum_timestamp = 2.0 + assert exporter._sort_and_select_events_by_timestamp(events) == [ + events[1], + events[0], + ] + exporter.minimum_timestamp = None + + # apply maximum timestamp requirement, expect to get only one + exporter.maximum_timestamp = 1.1 + assert exporter._sort_and_select_events_by_timestamp(events) == [events[2]] + + # apply both requirements, get one event back + exporter.minimum_timestamp = 2.0 + exporter.maximum_timestamp = 2.1 + assert exporter._sort_and_select_events_by_timestamp(events) == [events[1]] + + +# noinspection PyProtectedMember +def test_sort_and_select_events_by_timestamp_error(): + tracker_store = Mock() + exporter = MockExporter(tracker_store) + + # no events given + with pytest.raises(NoEventsInTimeRangeError): + exporter._sort_and_select_events_by_timestamp([]) + + # supply list of events, apply timestamp constraint and no events survive + exporter.minimum_timestamp = 3.1 + events = [random_user_uttered_event(3).as_dict()] + with pytest.raises(NoEventsInTimeRangeError): + exporter._sort_and_select_events_by_timestamp(events) + + +def test_get_message_headers_pika_event_broker(): + event_broker = Mock(spec=PikaEventBroker) + exporter = MockExporter(event_broker=event_broker) + + # noinspection PyProtectedMember + headers = exporter._get_message_headers() + + assert headers.get(RASA_EXPORT_PROCESS_ID_HEADER_NAME) + + +def test_get_message_headers_non_pika_broker(): + event_broker = Mock() + exporter = MockExporter(event_broker=event_broker) + + # noinspection PyProtectedMember + assert exporter._get_message_headers() is None + + +def test_publish_with_headers_pika_event_broker(): + event_broker = Mock(spec=PikaEventBroker) + exporter = MockExporter(event_broker=event_broker) + + headers = {"some": "header"} + event = {"some": "event"} + + # noinspection PyProtectedMember + exporter._publish_with_message_headers(event, headers) + + # the `PikaEventBroker`'s `publish()` method was called with both + # the `event` and `headers` arguments + event_broker.publish.assert_called_with(event=event, headers=headers) + + +def test_publish_with_headers_non_pika_event_broker(): + event_broker = Mock(SQLEventBroker) + exporter = MockExporter(event_broker=event_broker) + + headers = {"some": "header"} + event = {"some": "event"} + + # noinspection PyProtectedMember + exporter._publish_with_message_headers(event, headers) + + # the `SQLEventBroker`'s `publish()` method was called with only the `event` + # argument + event_broker.publish.assert_called_with(event) + + +def test_publishing_error(): + # mock event broker so it raises on `publish()` + event_broker = Mock() + event_broker.publish.side_effect = ValueError() + + exporter = MockExporter(event_broker=event_broker) + + user_event = random_user_uttered_event(1).as_dict() + user_event["sender_id"] = uuid.uuid4().hex + + # noinspection PyProtectedMember + exporter._fetch_events_within_time_range = Mock(return_value=[user_event]) + + # run the export function + with pytest.raises(PublishingError): + # noinspection PyProtectedMember + exporter.publish_events() diff --git a/tests/core/test_interactive.py b/tests/core/test_interactive.py index 18599c70c1cb..e9e4f0791ee8 100644 --- a/tests/core/test_interactive.py +++ b/tests/core/test_interactive.py @@ -1,51 +1,76 @@ +import asyncio import json +from collections import deque +from pathlib import Path +from typing import Any, Dict, List, Text + import pytest import uuid + +from _pytest.monkeypatch import MonkeyPatch from aioresponses import aioresponses +from mock import Mock import rasa.utils.io -from rasa.core.events import BotUttered +from rasa.core.actions import action +from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.core.channels import UserMessage +from rasa.core.domain import Domain +from rasa.core.events import BotUttered, ActionExecuted +from rasa.core.trackers import DialogueStateTracker from rasa.core.training import interactive +from rasa.importers.rasa import TrainingDataImporter +from rasa.nlu.training_data import Message +from rasa.nlu.training_data.loading import RASA, MARKDOWN, UNK from rasa.utils.endpoints import EndpointConfig -from rasa.core.actions.action import default_actions -from rasa.core.domain import Domain -from tests.utilities import latest_request, json_of_latest_request +from tests import utilities +from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS @pytest.fixture -def mock_endpoint(): +def mock_endpoint() -> EndpointConfig: return EndpointConfig("https://example.com") +@pytest.fixture +def mock_file_importer( + default_stack_config: Text, default_nlu_data: Text, default_stories_file: Text +): + domain_path = DEFAULT_DOMAIN_PATH_WITH_SLOTS + return TrainingDataImporter.load_from_config( + default_stack_config, domain_path, [default_nlu_data, default_stories_file] + ) + + async def test_send_message(mock_endpoint): sender_id = uuid.uuid4().hex - url = "{}/conversations/{}/messages".format(mock_endpoint.url, sender_id) + url = f"{mock_endpoint.url}/conversations/{sender_id}/messages" with aioresponses() as mocked: mocked.post(url, payload={}) await interactive.send_message(mock_endpoint, sender_id, "Hello") - r = latest_request(mocked, "post", url) + r = utilities.latest_request(mocked, "post", url) assert r expected = {"sender": "user", "text": "Hello", "parse_data": None} - assert json_of_latest_request(r) == expected + assert utilities.json_of_latest_request(r) == expected async def test_request_prediction(mock_endpoint): sender_id = uuid.uuid4().hex - url = "{}/conversations/{}/predict".format(mock_endpoint.url, sender_id) + url = f"{mock_endpoint.url}/conversations/{sender_id}/predict" with aioresponses() as mocked: mocked.post(url, payload={}) await interactive.request_prediction(mock_endpoint, sender_id) - assert latest_request(mocked, "post", url) is not None + assert utilities.latest_request(mocked, "post", url) is not None def test_bot_output_format(): @@ -155,7 +180,7 @@ async def test_print_history(mock_endpoint): await interactive._print_history(sender_id, mock_endpoint) - assert latest_request(mocked, "get", url) is not None + assert utilities.latest_request(mocked, "get", url) is not None async def test_is_listening_for_messages(mock_endpoint): @@ -200,42 +225,192 @@ def test_as_md_message(): assert md == "Hello there [rasa](name)." -def test_entity_annotation_merge_with_original(): - parse_original = { - "text": "Hello there rasa, it's me, paula.", - "entities": [ +@pytest.mark.parametrize( + "parse_original, parse_annotated, expected_entities", + [ + ( { - "start": 12, - "end": 16, - "entity": "name1", - "value": "rasa", - "extractor": "batman", - } - ], - "intent": {"name": "greeting", "confidence": 0.9}, - } - parse_annotated = { - "text": "Hello there rasa, it's me, paula.", - "entities": [ - {"start": 12, "end": 16, "entity": "name1", "value": "rasa"}, - {"start": 26, "end": 31, "entity": "name2", "value": "paula"}, - ], - "intent": {"name": "greeting", "confidence": 0.9}, - } - + "text": "Hello there rasa, it's me, paula.", + "entities": [ + { + "start": 12, + "end": 16, + "entity": "name1", + "value": "rasa", + "extractor": "batman", + } + ], + "intent": {"name": "greeting", "confidence": 0.9}, + }, + { + "text": "Hello there rasa, it's me, paula.", + "entities": [ + {"start": 12, "end": 16, "entity": "name1", "value": "rasa"}, + {"start": 26, "end": 31, "entity": "name2", "value": "paula"}, + ], + "intent": {"name": "greeting", "confidence": 0.9}, + }, + [ + { + "start": 12, + "end": 16, + "entity": "name1", + "value": "rasa", + "extractor": "batman", + }, + {"start": 26, "end": 31, "entity": "name2", "value": "paula"}, + ], + ), + ( + { + "text": "I am flying from Berlin to London.", + "entities": [ + { + "start": 17, + "end": 23, + "entity": "location", + "role": "from", + "value": "Berlin", + "extractor": "DIETClassifier", + } + ], + "intent": {"name": "inform", "confidence": 0.9}, + }, + { + "text": "I am flying from Berlin to London.", + "entities": [ + { + "start": 17, + "end": 23, + "entity": "location", + "value": "Berlin", + "role": "from", + }, + { + "start": 27, + "end": 33, + "entity": "location", + "value": "London", + "role": "to", + }, + ], + "intent": {"name": "inform", "confidence": 0.9}, + }, + [ + { + "start": 17, + "end": 23, + "entity": "location", + "value": "Berlin", + "role": "from", + }, + { + "start": 27, + "end": 33, + "entity": "location", + "value": "London", + "role": "to", + }, + ], + ), + ( + { + "text": "A large pepperoni and a small mushroom.", + "entities": [ + { + "start": 2, + "end": 7, + "entity": "size", + "group": "1", + "value": "large", + "extractor": "DIETClassifier", + }, + { + "start": 24, + "end": 29, + "entity": "size", + "value": "small", + "extractor": "DIETClassifier", + }, + ], + "intent": {"name": "inform", "confidence": 0.9}, + }, + { + "text": "A large pepperoni and a small mushroom.", + "entities": [ + { + "start": 2, + "end": 7, + "entity": "size", + "group": "1", + "value": "large", + }, + { + "start": 8, + "end": 17, + "entity": "toppings", + "group": "1", + "value": "pepperoni", + }, + { + "start": 30, + "end": 38, + "entity": "toppings", + "group": "1", + "value": "mushroom", + }, + { + "start": 24, + "end": 29, + "entity": "size", + "group": "2", + "value": "small", + }, + ], + "intent": {"name": "inform", "confidence": 0.9}, + }, + [ + { + "start": 2, + "end": 7, + "entity": "size", + "group": "1", + "value": "large", + }, + { + "start": 8, + "end": 17, + "entity": "toppings", + "group": "1", + "value": "pepperoni", + }, + { + "start": 30, + "end": 38, + "entity": "toppings", + "group": "1", + "value": "mushroom", + }, + { + "start": 24, + "end": 29, + "entity": "size", + "group": "2", + "value": "small", + }, + ], + ), + ], +) +def test__merge_annotated_and_original_entities( + parse_original: Dict[Text, Any], + parse_annotated: Dict[Text, Any], + expected_entities: List[Dict[Text, Any]], +): entities = interactive._merge_annotated_and_original_entities( parse_annotated, parse_original ) - assert entities == [ - { - "start": 12, - "end": 16, - "entity": "name1", - "value": "rasa", - "extractor": "batman", - }, - {"start": 26, "end": 31, "entity": "name2", "value": "paula"}, - ] + assert entities == expected_entities def test_validate_user_message(): @@ -267,13 +442,13 @@ async def test_undo_latest_msg(mock_endpoint): await interactive._undo_latest(sender_id, mock_endpoint) - r = latest_request(mocked, "post", append_url) + r = utilities.latest_request(mocked, "post", append_url) assert r # this should be the events the interactive call send to the endpoint # these events should have the last utterance omitted - corrected_event = json_of_latest_request(r) + corrected_event = utilities.json_of_latest_request(r) assert corrected_event["event"] == "undo" @@ -299,7 +474,9 @@ def test_utter_custom_message(): assert json.dumps({"a": "b"}) in actual -async def test_interactive_domain_persistence(mock_endpoint, tmpdir): +async def test_interactive_domain_persistence( + mock_endpoint: EndpointConfig, tmp_path: Path +): # Test method interactive._write_domain_to_file tracker_dump = "data/test_trackers/tracker_moodbot.json" @@ -307,18 +484,136 @@ async def test_interactive_domain_persistence(mock_endpoint, tmpdir): events = tracker_json.get("events", []) - domain_path = tmpdir.join("interactive_domain_save.yml").strpath + domain_path = str(tmp_path / "interactive_domain_save.yml") - url = "{}/domain".format(mock_endpoint.url) + url = f"{mock_endpoint.url}/domain" with aioresponses() as mocked: mocked.get(url, payload={}) serialised_domain = await interactive.retrieve_domain(mock_endpoint) old_domain = Domain.from_dict(serialised_domain) - await interactive._write_domain_to_file(domain_path, events, old_domain) + interactive._write_domain_to_file(domain_path, events, old_domain) saved_domain = rasa.utils.io.read_config_file(domain_path) - for default_action in default_actions(): + for default_action in action.default_actions(): assert default_action.name() not in saved_domain["actions"] + + +async def test_write_domain_to_file_with_form(tmp_path: Path): + domain_path = str(tmp_path / "domain.yml") + form_name = "my_form" + old_domain = Domain.from_yaml( + f""" + actions: + - utter_greet + - utter_goodbye + forms: + - {form_name} + intents: + - greet + """ + ) + + events = [ActionExecuted(form_name), ActionExecuted(ACTION_LISTEN_NAME)] + events = [e.as_dict() for e in events] + + interactive._write_domain_to_file(domain_path, events, old_domain) + + assert set(Domain.from_path(domain_path).action_names) == set( + old_domain.action_names + ) + + +async def test_filter_intents_before_save_nlu_file(): + # Test method interactive._filter_messages + from random import choice + + greet = {"intent": "greet", "text_features": [0.5]} + goodbye = {"intent": "goodbye", "text_features": [0.5]} + test_msgs = [Message("How are you?", greet), Message("I am inevitable", goodbye)] + + domain_file = DEFAULT_DOMAIN_PATH_WITH_SLOTS + domain = Domain.load(domain_file) + intents = domain.intents + + msgs = test_msgs.copy() + if intents: + msgs.append(Message("/" + choice(intents), greet)) + + assert test_msgs == interactive._filter_messages(msgs) + + +@pytest.mark.parametrize( + "path, expected_format", + [("bla.json", RASA), ("other.md", MARKDOWN), ("unknown", UNK)], +) +def test_get_nlu_target_format(path: Text, expected_format: Text): + assert interactive._get_nlu_target_format(path) == expected_format + + +@pytest.mark.parametrize( + "trackers, expected_trackers", + [ + ( + [DialogueStateTracker.from_events("one", [])], + [deque([]), UserMessage.DEFAULT_SENDER_ID], + ), + ( + [ + str(i) + for i in range( + interactive.MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION + 1 + ) + ], + [UserMessage.DEFAULT_SENDER_ID], + ), + ], +) +async def test_initial_plotting_call( + mock_endpoint: EndpointConfig, + monkeypatch: MonkeyPatch, + trackers: List[Text], + expected_trackers: List[Text], + mock_file_importer: TrainingDataImporter, +): + get_training_trackers = Mock(return_value=trackers) + monkeypatch.setattr( + interactive, "_get_training_trackers", asyncio.coroutine(get_training_trackers) + ) + + monkeypatch.setattr(interactive.utils, "is_limit_reached", lambda _, __: True) + + plot_trackers = Mock() + monkeypatch.setattr(interactive, "_plot_trackers", asyncio.coroutine(plot_trackers)) + + url = f"{mock_endpoint.url}/domain" + with aioresponses() as mocked: + mocked.get(url, payload={}) + + await interactive.record_messages(mock_endpoint, mock_file_importer) + + get_training_trackers.assert_called_once() + plot_trackers.assert_called_once_with( + expected_trackers, interactive.DEFAULT_STORY_GRAPH_FILE, mock_endpoint + ) + + +async def test_not_getting_trackers_when_skipping_visualization( + mock_endpoint: EndpointConfig, monkeypatch: MonkeyPatch +): + get_trackers = Mock() + monkeypatch.setattr(interactive, "_get_tracker_events_to_plot", get_trackers) + + monkeypatch.setattr(interactive.utils, "is_limit_reached", lambda _, __: True) + + url = f"{mock_endpoint.url}/domain" + with aioresponses() as mocked: + mocked.get(url, payload={}) + + await interactive.record_messages( + mock_endpoint, mock_file_importer, skip_visualization=True + ) + + get_trackers.assert_not_called() diff --git a/tests/core/test_interpreter.py b/tests/core/test_interpreter.py index f86994be54c8..6892f61c9686 100644 --- a/tests/core/test_interpreter.py +++ b/tests/core/test_interpreter.py @@ -6,6 +6,7 @@ RasaNLUHttpInterpreter, RegexInterpreter, ) +from rasa.nlu.constants import INTENT_NAME_KEY from rasa.utils.endpoints import EndpointConfig from tests.utilities import latest_request, json_of_latest_request @@ -21,7 +22,9 @@ async def test_regex_interpreter_intent(regex_interpreter): assert result["text"] == text assert len(result["intent_ranking"]) == 1 assert ( - result["intent"]["name"] == result["intent_ranking"][0]["name"] == "my_intent" + result["intent"][INTENT_NAME_KEY] + == result["intent_ranking"][0][INTENT_NAME_KEY] + == "my_intent" ) assert ( result["intent"]["confidence"] @@ -37,7 +40,9 @@ async def test_regex_interpreter_entities(regex_interpreter): assert result["text"] == text assert len(result["intent_ranking"]) == 1 assert ( - result["intent"]["name"] == result["intent_ranking"][0]["name"] == "my_intent" + result["intent"][INTENT_NAME_KEY] + == result["intent_ranking"][0][INTENT_NAME_KEY] + == "my_intent" ) assert ( result["intent"]["confidence"] @@ -55,7 +60,9 @@ async def test_regex_interpreter_confidence(regex_interpreter): assert result["text"] == text assert len(result["intent_ranking"]) == 1 assert ( - result["intent"]["name"] == result["intent_ranking"][0]["name"] == "my_intent" + result["intent"][INTENT_NAME_KEY] + == result["intent_ranking"][0][INTENT_NAME_KEY] + == "my_intent" ) assert ( result["intent"]["confidence"] @@ -71,7 +78,9 @@ async def test_regex_interpreter_confidence_and_entities(regex_interpreter): assert result["text"] == text assert len(result["intent_ranking"]) == 1 assert ( - result["intent"]["name"] == result["intent_ranking"][0]["name"] == "my_intent" + result["intent"][INTENT_NAME_KEY] + == result["intent_ranking"][0][INTENT_NAME_KEY] + == "my_intent" ) assert ( result["intent"]["confidence"] @@ -102,7 +111,7 @@ async def test_http_interpreter(endpoint_url, joined_url): mocked.post(joined_url) endpoint = EndpointConfig(endpoint_url) - interpreter = RasaNLUHttpInterpreter(endpoint=endpoint) + interpreter = RasaNLUHttpInterpreter(endpoint_config=endpoint) await interpreter.parse(text="message_text", message_id="message_id") r = latest_request(mocked, "POST", joined_url) diff --git a/tests/core/test_lock_store.py b/tests/core/test_lock_store.py index abdd77777af5..38b767170aa7 100644 --- a/tests/core/test_lock_store.py +++ b/tests/core/test_lock_store.py @@ -1,29 +1,33 @@ import asyncio -import copy import os -from typing import Union, Text -from unittest.mock import patch import numpy as np import pytest import time + +from _pytest.monkeypatch import MonkeyPatch from _pytest.tmpdir import TempdirFactory +from unittest.mock import patch, Mock -import rasa.utils.io from rasa.core.agent import Agent from rasa.core.channels import UserMessage from rasa.core.constants import INTENT_MESSAGE_PREFIX, DEFAULT_LOCK_LIFETIME -from rasa.core.lock import TicketLock, Ticket -from rasa.core.lock_store import InMemoryLockStore, LockError, TicketExistsError +from rasa.core.lock import TicketLock +from rasa.core.lock_store import InMemoryLockStore, LockError, LockStore, RedisLockStore + + +class FakeRedisLockStore(RedisLockStore): + """Fake `RedisLockStore` using `fakeredis` library.""" + + def __init__(self): + import fakeredis + self.red = fakeredis.FakeStrictRedis() -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() + # added in redis==3.3.0, but not yet in fakeredis + self.red.connection_pool.connection_class.health_check_interval = 0 + + super(RedisLockStore, self).__init__() def test_issue_ticket(): @@ -61,8 +65,8 @@ def test_remove_expired_tickets(): assert len(lock.tickets) == 1 -def test_create_lock_store(): - lock_store = InMemoryLockStore() +@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()]) +def test_create_lock_store(lock_store: LockStore): conversation_id = "my id 0" # create and lock @@ -73,8 +77,8 @@ def test_create_lock_store(): assert lock.conversation_id == conversation_id -def test_serve_ticket(): - lock_store = InMemoryLockStore() +@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()]) +def test_serve_ticket(lock_store: LockStore): conversation_id = "my id 1" lock = lock_store.create_lock(conversation_id) @@ -108,8 +112,9 @@ def test_serve_ticket(): assert not lock.is_someone_waiting() -def test_lock_expiration(): - lock_store = InMemoryLockStore() +# noinspection PyProtectedMember +@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()]) +def test_lock_expiration(lock_store: LockStore): conversation_id = "my id 2" lock = lock_store.create_lock(conversation_id) lock_store.save_lock(lock) @@ -129,37 +134,10 @@ def test_lock_expiration(): assert lock.issue_ticket(10) == 1 -def test_ticket_exists_error(): - def mocked_issue_ticket( - self, - conversation_id: Text, - lock_lifetime: Union[float, int] = DEFAULT_LOCK_LIFETIME, - ) -> None: - # mock LockStore.issue_ticket() so it issues two tickets for the same - # conversation ID simultaneously - - lock = self.get_or_create_lock(conversation_id) - lock.issue_ticket(lock_lifetime) - self.save_lock(lock) - - # issue another ticket for this lock - lock_2 = copy.deepcopy(lock) - lock_2.tickets.append(Ticket(1, time.time() + DEFAULT_LOCK_LIFETIME)) - - self.ensure_ticket_available(lock_2) - - lock_store = InMemoryLockStore() - conversation_id = "my id 3" - - with patch.object(InMemoryLockStore, "issue_ticket", mocked_issue_ticket): - with pytest.raises(TicketExistsError): - lock_store.issue_ticket(conversation_id) - - async def test_multiple_conversation_ids(default_agent: Agent): text = INTENT_MESSAGE_PREFIX + 'greet{"name":"Rasa"}' - conversation_ids = ["conversation {}".format(i) for i in range(2)] + conversation_ids = [f"conversation {i}" for i in range(2)] # ensure conversations are processed in order tasks = [default_agent.handle_text(text, sender_id=_id) for _id in conversation_ids] @@ -185,9 +163,7 @@ async def test_message_order(tmpdir_factory: TempdirFactory, default_agent: Agen # record messages as they come and and as they're processed in files so we # can check the order later on. We don't need the return value of this method so # we'll just return None. - async def mocked_handle_message( - self, message: UserMessage, wait: Union[int, float] - ) -> None: + async def mocked_handle_message(self, message: UserMessage, wait: float) -> None: # write incoming message to file with open(str(incoming_order_file), "a+") as f_0: f_0.write(message.text + "\n") @@ -213,7 +189,7 @@ async def mocked_handle_message( wait_times = np.linspace(0.1, 0.05, n_messages) tasks = [ default_agent.handle_message( - UserMessage("sender {0}".format(i), sender_id="some id"), wait=k + UserMessage(f"sender {i}", sender_id="some id"), wait=k ) for i, k in enumerate(wait_times) ] @@ -221,16 +197,16 @@ async def mocked_handle_message( # execute futures await asyncio.gather(*(asyncio.ensure_future(t) for t in tasks)) - expected_order = ["sender {0}".format(i) for i in range(len(wait_times))] + expected_order = [f"sender {i}" for i in range(len(wait_times))] # ensure order of incoming messages is as expected with open(str(incoming_order_file)) as f: - incoming_order = [l for l in f.read().split("\n") if l] + incoming_order = [line for line in f.read().split("\n") if line] assert incoming_order == expected_order # ensure results are processed in expected order with open(str(results_file)) as f: - results_order = [l for l in f.read().split("\n") if l] + results_order = [line for line in f.read().split("\n") if line] assert results_order == expected_order # Every message after the first one will wait `lock_wait` seconds to acquire its @@ -266,7 +242,7 @@ async def mocked_handle_message(self, message: UserMessage) -> None: # meaning the second message will not be able to acquire a lock tasks = [ default_agent.handle_message( - UserMessage("sender {0}".format(i), sender_id="some id") + UserMessage(f"sender {i}", sender_id="some id") ) for i in range(2) ] @@ -275,17 +251,29 @@ async def mocked_handle_message(self, message: UserMessage) -> None: await asyncio.gather(*(asyncio.ensure_future(t) for t in tasks)) -async def test_lock_lifetime_environment_variable(): +async def test_lock_lifetime_environment_variable(monkeypatch: MonkeyPatch): import rasa.core.lock_store - import importlib # by default lock lifetime is `DEFAULT_LOCK_LIFETIME` - assert rasa.core.lock_store.LOCK_LIFETIME == DEFAULT_LOCK_LIFETIME + assert rasa.core.lock_store._get_lock_lifetime() == DEFAULT_LOCK_LIFETIME # set new lock lifetime as environment variable new_lock_lifetime = 123 - os.environ["TICKET_LOCK_LIFETIME"] = str(new_lock_lifetime) + monkeypatch.setenv("TICKET_LOCK_LIFETIME", str(new_lock_lifetime)) + + assert rasa.core.lock_store._get_lock_lifetime() == new_lock_lifetime + + +async def test_redis_lock_store_timeout(monkeypatch: MonkeyPatch): + import redis.exceptions + + lock_store = FakeRedisLockStore() + monkeypatch.setattr( + lock_store, + lock_store.get_or_create_lock.__name__, + Mock(side_effect=redis.exceptions.TimeoutError), + ) - # reload module and check value is updated - importlib.reload(rasa.core.lock_store) - assert rasa.core.lock_store.LOCK_LIFETIME == new_lock_lifetime + with pytest.raises(LockError): + async with lock_store.lock("some sender"): + pass diff --git a/tests/core/test_model.py b/tests/core/test_model.py index 40824f596984..13485fc80974 100644 --- a/tests/core/test_model.py +++ b/tests/core/test_model.py @@ -2,7 +2,9 @@ import tempfile import time import shutil -from typing import Text, Optional +from pathlib import Path +from typing import Text, Optional, Any +from unittest.mock import Mock import pytest from _pytest.tmpdir import TempdirFactory @@ -11,51 +13,61 @@ import rasa.core import rasa.nlu from rasa.importers.rasa import RasaFileImporter -from rasa.constants import DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH +from rasa.constants import ( + DEFAULT_CONFIG_PATH, + DEFAULT_DATA_PATH, + DEFAULT_DOMAIN_PATH, + DEFAULT_CORE_SUBDIRECTORY_NAME, +) from rasa.core.domain import Domain +from rasa.core.utils import get_dict_hash +from rasa import model from rasa.model import ( FINGERPRINT_CONFIG_KEY, - FINGERPRINT_DOMAIN_KEY, + FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY, + FINGERPRINT_NLG_KEY, FINGERPRINT_FILE_PATH, FINGERPRINT_NLU_DATA_KEY, FINGERPRINT_RASA_VERSION_KEY, FINGERPRINT_STORIES_KEY, FINGERPRINT_TRAINED_AT_KEY, - core_fingerprint_changed, + FINGERPRINT_CONFIG_CORE_KEY, + FINGERPRINT_CONFIG_NLU_KEY, + SECTION_CORE, + SECTION_NLU, create_package_rasa, get_latest_model, get_model, get_model_subdirectories, model_fingerprint, - nlu_fingerprint_changed, Fingerprint, + did_section_fingerprint_change, should_retrain, - FINGERPRINT_CONFIG_CORE_KEY, - FINGERPRINT_CONFIG_NLU_KEY, + FingerprintComparisonResult, ) from rasa.exceptions import ModelNotFound -def test_get_latest_model(trained_model): +def test_get_latest_model(trained_rasa_model: str): import shutil - path_of_latest = os.path.join(os.path.dirname(trained_model), "latest.tar.gz") - shutil.copy(trained_model, path_of_latest) + path_of_latest = os.path.join(os.path.dirname(trained_rasa_model), "latest.tar.gz") + shutil.copy(trained_rasa_model, path_of_latest) model_directory = os.path.dirname(path_of_latest) assert get_latest_model(model_directory) == path_of_latest -def test_get_model_from_directory(trained_model): - unpacked = get_model(trained_model) +def test_get_model_from_directory(trained_rasa_model: str): + unpacked = get_model(trained_rasa_model) - assert os.path.exists(os.path.join(unpacked, "core")) + assert os.path.exists(os.path.join(unpacked, DEFAULT_CORE_SUBDIRECTORY_NAME)) assert os.path.exists(os.path.join(unpacked, "nlu")) -def test_get_model_context_manager(trained_model): - with get_model(trained_model) as unpacked: +def test_get_model_context_manager(trained_rasa_model: str): + with get_model(trained_rasa_model) as unpacked: assert os.path.exists(unpacked) assert not os.path.exists(unpacked) @@ -68,9 +80,9 @@ def test_get_model_exception(model_path): def test_get_model_from_directory_with_subdirectories( - trained_model, tmpdir_factory: TempdirFactory + trained_rasa_model, tmpdir_factory: TempdirFactory ): - unpacked = get_model(trained_model) + unpacked = get_model(trained_rasa_model) unpacked_core, unpacked_nlu = get_model_subdirectories(unpacked) assert unpacked_core @@ -81,9 +93,9 @@ def test_get_model_from_directory_with_subdirectories( get_model_subdirectories(directory) -def test_get_model_from_directory_nlu_only(trained_model): - unpacked = get_model(trained_model) - shutil.rmtree(os.path.join(unpacked, "core")) +def test_get_model_from_directory_nlu_only(trained_rasa_model): + unpacked = get_model(trained_rasa_model) + shutil.rmtree(os.path.join(unpacked, DEFAULT_CORE_SUBDIRECTORY_NAME)) unpacked_core, unpacked_nlu = get_model_subdirectories(unpacked) assert not unpacked_core @@ -91,13 +103,14 @@ def test_get_model_from_directory_nlu_only(trained_model): def _fingerprint( - config: Optional[Text] = None, - config_nlu: Optional[Text] = None, - config_core: Optional[Text] = None, - domain: Optional[int] = None, + config: Optional[Any] = None, + config_nlu: Optional[Any] = None, + config_core: Optional[Any] = None, + domain: Optional[Any] = None, + nlg: Optional[Any] = None, + stories: Optional[Any] = None, + nlu: Optional[Any] = None, rasa_version: Text = "1.0", - stories: Optional[int] = None, - nlu: Optional[int] = None, ): return { FINGERPRINT_CONFIG_KEY: config if config is not None else ["test"], @@ -105,7 +118,8 @@ def _fingerprint( if config_core is not None else ["test"], FINGERPRINT_CONFIG_NLU_KEY: config_nlu if config_nlu is not None else ["test"], - FINGERPRINT_DOMAIN_KEY: domain if domain is not None else ["test"], + FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain if domain is not None else ["test"], + FINGERPRINT_NLG_KEY: nlg if nlg is not None else ["test"], FINGERPRINT_TRAINED_AT_KEY: time.time(), FINGERPRINT_RASA_VERSION_KEY: rasa_version, FINGERPRINT_STORIES_KEY: stories if stories is not None else ["test"], @@ -126,39 +140,52 @@ def test_persist_and_load_fingerprint(): @pytest.mark.parametrize( - "fingerprint2", + "fingerprint2, changed", [ - _fingerprint(config=["other"]), - _fingerprint(domain=["other"]), - _fingerprint(domain=Domain.empty()), - _fingerprint(stories=["test", "other"]), - _fingerprint(rasa_version="100"), - _fingerprint(config=["other"], domain=["other"]), + (_fingerprint(config=["other"]), True), + (_fingerprint(config_core=["other"]), True), + (_fingerprint(domain=["other"]), True), + (_fingerprint(domain=Domain.empty()), True), + (_fingerprint(stories=["test", "other"]), True), + (_fingerprint(rasa_version="100"), True), + (_fingerprint(config=["other"], domain=["other"]), True), + (_fingerprint(nlg=["other"]), False), + (_fingerprint(nlu=["test", "other"]), False), + (_fingerprint(config_nlu=["other"]), False), ], ) -def test_core_fingerprint_changed(fingerprint2): +def test_core_fingerprint_changed(fingerprint2, changed): fingerprint1 = _fingerprint() - assert core_fingerprint_changed(fingerprint1, fingerprint2) + assert ( + did_section_fingerprint_change(fingerprint1, fingerprint2, SECTION_CORE) + is changed + ) @pytest.mark.parametrize( - "fingerprint2", + "fingerprint2, changed", [ - _fingerprint(config=["other"]), - _fingerprint(nlu=["test", "other"]), - _fingerprint(rasa_version="100"), - _fingerprint(rasa_version="100", config=["other"]), + (_fingerprint(config=["other"]), True), + (_fingerprint(nlu=["test", "other"]), True), + (_fingerprint(rasa_version="100"), True), + (_fingerprint(rasa_version="100", config=["other"]), True), + (_fingerprint(nlg=["other"]), False), + (_fingerprint(config_core=["other"]), False), + (_fingerprint(stories=["other"]), False), ], ) -def test_nlu_fingerprint_changed(fingerprint2): +def test_nlu_fingerprint_changed(fingerprint2, changed): fingerprint1 = _fingerprint() - assert nlu_fingerprint_changed(fingerprint1, fingerprint2) + assert ( + did_section_fingerprint_change(fingerprint1, fingerprint2, SECTION_NLU) + is changed + ) def _project_files( project, config_file=DEFAULT_CONFIG_PATH, - domain="domain.yml", + domain=DEFAULT_DOMAIN_PATH, training_files=DEFAULT_DATA_PATH, ): paths = { @@ -192,9 +219,10 @@ async def test_create_fingerprint_from_invalid_paths(project, project_files): config_nlu="", config_core="", domain=hash(Domain.empty()), - rasa_version=rasa.__version__, + nlg=get_dict_hash(Domain.empty().templates), stories=hash(StoryGraph([])), nlu=hash(TrainingData()), + rasa_version=rasa.__version__, ) actual = await model_fingerprint(project_files) @@ -207,8 +235,8 @@ async def test_create_fingerprint_from_invalid_paths(project, project_files): @pytest.mark.parametrize("use_fingerprint", [True, False]) -async def test_rasa_packaging(trained_model, project, use_fingerprint): - unpacked_model_path = get_model(trained_model) +async def test_rasa_packaging(trained_rasa_model, project, use_fingerprint): + unpacked_model_path = get_model(trained_rasa_model) os.remove(os.path.join(unpacked_model_path, FINGERPRINT_FILE_PATH)) if use_fingerprint: @@ -226,7 +254,7 @@ async def test_rasa_packaging(trained_model, project, use_fingerprint): assert ( os.path.exists(os.path.join(unpacked, FINGERPRINT_FILE_PATH)) == use_fingerprint ) - assert os.path.exists(os.path.join(unpacked, "core")) + assert os.path.exists(os.path.join(unpacked, DEFAULT_CORE_SUBDIRECTORY_NAME)) assert os.path.exists(os.path.join(unpacked, "nlu")) assert not os.path.exists(unpacked_model_path) @@ -240,60 +268,66 @@ async def test_rasa_packaging(trained_model, project, use_fingerprint): "old": _fingerprint(stories=["others"]), "retrain_core": True, "retrain_nlu": False, + "retrain_nlg": True, }, { "new": _fingerprint(nlu=["others"]), "old": _fingerprint(), "retrain_core": False, "retrain_nlu": True, + "retrain_nlg": False, }, { "new": _fingerprint(config="others"), "old": _fingerprint(), "retrain_core": True, "retrain_nlu": True, + "retrain_nlg": True, }, { "new": _fingerprint(config_core="others"), "old": _fingerprint(), "retrain_core": True, "retrain_nlu": False, + "retrain_nlg": True, }, { "new": _fingerprint(), "old": _fingerprint(config_nlu="others"), "retrain_core": False, "retrain_nlu": True, + "retrain_nlg": False, }, { "new": _fingerprint(), "old": _fingerprint(), "retrain_core": False, "retrain_nlu": False, + "retrain_nlg": False, + }, + { + "new": _fingerprint(), + "old": _fingerprint(nlg=["others"]), + "retrain_core": False, + "retrain_nlu": False, + "retrain_nlg": True, }, ], ) -def test_should_retrain(trained_model, fingerprint): - old_model = set_fingerprint(trained_model, fingerprint["old"]) +def test_should_retrain(trained_rasa_model: Text, fingerprint: Fingerprint): + old_model = set_fingerprint(trained_rasa_model, fingerprint["old"]) - retrain_core, retrain_nlu = should_retrain( - fingerprint["new"], old_model, tempfile.mkdtemp() - ) + retrain = should_retrain(fingerprint["new"], old_model, tempfile.mkdtemp()) - assert retrain_core == fingerprint["retrain_core"] - assert retrain_nlu == fingerprint["retrain_nlu"] + assert retrain.should_retrain_core() == fingerprint["retrain_core"] + assert retrain.should_retrain_nlg() == fingerprint["retrain_nlg"] + assert retrain.should_retrain_nlu() == fingerprint["retrain_nlu"] -def set_fingerprint( - trained_model: Text, fingerprint: Fingerprint, use_fingerprint: bool = True -) -> Text: - unpacked_model_path = get_model(trained_model) +def set_fingerprint(trained_rasa_model: Text, fingerprint: Fingerprint) -> Text: + unpacked_model_path = get_model(trained_rasa_model) os.remove(os.path.join(unpacked_model_path, FINGERPRINT_FILE_PATH)) - if use_fingerprint: - fingerprint = fingerprint - else: - fingerprint = None tempdir = tempfile.mkdtemp() output_path = os.path.join(tempdir, "test.tar.gz") @@ -301,3 +335,62 @@ def set_fingerprint( create_package_rasa(unpacked_model_path, output_path, fingerprint) return output_path + + +@pytest.mark.parametrize( + "comparison_result,retrain_all,retrain_core,retrain_nlg,retrain_nlu", + [ + (FingerprintComparisonResult(force_training=True), True, True, True, True), + ( + FingerprintComparisonResult(core=True, nlu=False, nlg=False), + True, + True, + True, + False, + ), + ( + FingerprintComparisonResult(core=False, nlu=True, nlg=False), + True, + False, + False, + True, + ), + ( + FingerprintComparisonResult(core=True, nlu=True, nlg=False), + True, + True, + True, + True, + ), + ], +) +def test_fingerprint_comparison_result( + comparison_result: FingerprintComparisonResult, + retrain_all: bool, + retrain_core: bool, + retrain_nlg: bool, + retrain_nlu: bool, +): + assert comparison_result.is_training_required() == retrain_all + assert comparison_result.should_retrain_core() == retrain_core + assert comparison_result.should_retrain_nlg() == retrain_nlg + assert comparison_result.should_retrain_nlu() == retrain_nlu + + +async def test_update_with_new_domain(trained_rasa_model: Text, tmpdir: Path): + _ = model.unpack_model(trained_rasa_model, tmpdir) + + new_domain = Domain.empty() + + mocked_importer = Mock() + + async def get_domain() -> Domain: + return new_domain + + mocked_importer.get_domain = get_domain + + await model.update_model_with_new_domain(mocked_importer, tmpdir) + + actual = Domain.load(tmpdir / DEFAULT_CORE_SUBDIRECTORY_NAME / DEFAULT_DOMAIN_PATH) + + assert actual.is_empty() diff --git a/tests/core/test_nlg.py b/tests/core/test_nlg.py index 3cf6c3241da9..ae0a9efc4c87 100644 --- a/tests/core/test_nlg.py +++ b/tests/core/test_nlg.py @@ -1,12 +1,10 @@ -import asyncio import uuid +from typing import Text, Any import jsonschema import pytest -from flask import Flask, request, jsonify -from pytest_localserver.http import WSGIServer +from sanic import Sanic, response -import rasa.utils.io from rasa.core.nlg.callback import ( nlg_request_format_spec, CallbackNaturalLanguageGenerator, @@ -17,20 +15,12 @@ from tests.core.conftest import DEFAULT_ENDPOINTS_FILE -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - def nlg_app(base_url="/"): - app = Flask(__name__) + + app = Sanic(__name__) @app.route(base_url, methods=["POST"]) - def generate(): + async def generate(request): """Simple HTTP NLG generator, checks that the incoming request is format according to the spec.""" @@ -39,29 +29,25 @@ def generate(): jsonschema.validate(nlg_call, nlg_request_format_spec()) if nlg_call.get("template") == "utter_greet": - response = {"text": "Hey there!"} + response_dict = {"text": "Hey there!"} else: - response = {"text": "Sorry, didn't get that."} - return jsonify(response) + response_dict = {"text": "Sorry, didn't get that."} + return response.json(response_dict) return app # noinspection PyShadowingNames -@pytest.fixture(scope="module") -def http_nlg(request): - http_server = WSGIServer(application=nlg_app()) - http_server.start() +@pytest.fixture() +def http_nlg(loop, sanic_client): + return loop.run_until_complete(sanic_client(nlg_app())) - request.addfinalizer(http_server.stop) - return http_server.url - -async def test_nlg(http_nlg, default_agent_path): +async def test_nlg(http_nlg, trained_rasa_model): sender = str(uuid.uuid1()) - nlg_endpoint = EndpointConfig.from_dict({"url": http_nlg}) - agent = Agent.load(default_agent_path, None, generator=nlg_endpoint) + nlg_endpoint = EndpointConfig.from_dict({"url": http_nlg.make_url("/")}) + agent = Agent.load(trained_rasa_model, None, generator=nlg_endpoint) response = await agent.handle_text("/greet", sender_id=sender) assert len(response) == 1 @@ -108,8 +94,8 @@ def test_nlg_schema_validation_empty_image(): ("null", None), ], ) -def test_nlg_fill_template_text(slot_name, slot_value): - template = {"text": "{" + slot_name + "}"} +def test_nlg_fill_template_text(slot_name: Text, slot_value: Any): + template = {"text": f"{{{slot_name}}}"} t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template(template=template, filled_slots={slot_name: slot_value}) assert result == {"text": str(slot_value)} @@ -119,8 +105,8 @@ def test_nlg_fill_template_text(slot_name, slot_value): "img_slot_name, img_slot_value", [("url", "https://www.exampleimg.com"), ("img1", "https://www.appleimg.com")], ) -def test_nlg_fill_template_image(img_slot_name, img_slot_value): - template = {"image": "{" + img_slot_name + "}"} +def test_nlg_fill_template_image(img_slot_name: Text, img_slot_value: Text): + template = {"image": f"{{{img_slot_name}}}"} t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template( template=template, filled_slots={img_slot_name: img_slot_value} @@ -147,20 +133,43 @@ def test_nlg_fill_template_image(img_slot_name, img_slot_value): ("null", None), ], ) -def test_nlg_fill_template_custom(slot_name, slot_value): - template = {"text": "{" + slot_name + "}"} +def test_nlg_fill_template_custom(slot_name: Text, slot_value: Any): template = { "custom": { - "field": "{" + slot_name + "}", - "properties": {"field_prefixed": "prefix_{" + slot_name + "}"}, + "field": f"{{{slot_name}}}", + "properties": {"field_prefixed": f"prefix_{{{slot_name}}}"}, + "bool_field": True, + "int_field:": 42, + "empty_field": None, } } t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template(template=template, filled_slots={slot_name: slot_value}) + assert result == { "custom": { "field": str(slot_value), - "properties": {"field_prefixed": "prefix_" + str(slot_value)}, + "properties": {"field_prefixed": f"prefix_{slot_value}"}, + "bool_field": True, + "int_field:": 42, + "empty_field": None, + } + } + + +def test_nlg_fill_template_custom_with_list(): + template = { + "custom": { + "blocks": [{"fields": [{"text": "*Departure date:*\n{test}"}]}], + "other": ["{test}"], + } + } + t = TemplatedNaturalLanguageGenerator(templates=dict()) + result = t._fill_template(template=template, filled_slots={"test": 5}) + assert result == { + "custom": { + "blocks": [{"fields": [{"text": "*Departure date:*\n5"}]}], + "other": ["5"], } } @@ -186,7 +195,7 @@ def test_nlg_fill_template_text_with_json(template_text, expected): @pytest.mark.parametrize("slot_name, slot_value", [("tag_w_\n", "a")]) def test_nlg_fill_template_with_bad_slot_name(slot_name, slot_value): - template_text = "{" + slot_name + "}" + template_text = f"{{{slot_name}}}" t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template( template={"text": template_text}, filled_slots={slot_name: slot_value} @@ -204,7 +213,7 @@ def test_nlg_fill_template_with_bad_slot_name(slot_name, slot_value): def test_nlg_fill_template_image_and_text( text_slot_name, text_slot_value, img_slot_name, img_slot_value ): - template = {"text": "{" + text_slot_name + "}", "image": "{" + img_slot_name + "}"} + template = {"text": f"{{{text_slot_name}}}", "image": f"{{{img_slot_name}}}"} t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template( template=template, @@ -224,10 +233,10 @@ def test_nlg_fill_template_text_and_custom( text_slot_name, text_slot_value, cust_slot_name, cust_slot_value ): template = { - "text": "{" + text_slot_name + "}", + "text": f"{{{text_slot_name}}}", "custom": { - "field": "{" + cust_slot_name + "}", - "properties": {"field_prefixed": "prefix_{" + cust_slot_name + "}"}, + "field": f"{{{cust_slot_name}}}", + "properties": {"field_prefixed": f"prefix_{{{cust_slot_name}}}"}, }, } t = TemplatedNaturalLanguageGenerator(templates=dict()) @@ -239,7 +248,7 @@ def test_nlg_fill_template_text_and_custom( "text": str(text_slot_value), "custom": { "field": str(cust_slot_value), - "properties": {"field_prefixed": "prefix_" + str(cust_slot_value)}, + "properties": {"field_prefixed": f"prefix_{str(cust_slot_value)}"}, }, } @@ -257,15 +266,29 @@ def test_nlg_fill_template_attachment(attach_slot_name, attach_slot_value): @pytest.mark.parametrize( - "button_slot_name, button_slot_value", [("button_1", "button1")] + "button_slot_name, button_slot_value", [("button_1", "button_1")] ) def test_nlg_fill_template_button(button_slot_name, button_slot_value): - template = {"button": "{" + button_slot_name + "}"} + template = { + "buttons": [ + { + "payload": f'/choose{{{{"some_slot": "{{{button_slot_name}}}"}}}}', + "title": f"{{{button_slot_name}}}", + } + ] + } t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template( template=template, filled_slots={button_slot_name: button_slot_value} ) - assert result == {"button": str(button_slot_value)} + assert result == { + "buttons": [ + { + "payload": f'/choose{{"some_slot": "{button_slot_value}"}}', + "title": f"{button_slot_value}", + } + ] + } @pytest.mark.parametrize( @@ -274,7 +297,7 @@ def test_nlg_fill_template_button(button_slot_name, button_slot_value): def test_nlg_fill_template_quick_replies( quick_replies_slot_name, quick_replies_slot_value ): - template = {"quick_replies": "{" + quick_replies_slot_name + "}"} + template = {"quick_replies": f"{{{quick_replies_slot_name}}}"} t = TemplatedNaturalLanguageGenerator(templates=dict()) result = t._fill_template( template=template, diff --git a/tests/core/test_policies.py b/tests/core/test_policies.py index 0346405b71b9..9232677cbb40 100644 --- a/tests/core/test_policies.py +++ b/tests/core/test_policies.py @@ -1,11 +1,9 @@ -import asyncio -from unittest.mock import patch +from typing import Type +from unittest.mock import Mock, patch import numpy as np import pytest -import rasa.utils.io -from rasa.utils import train_utils from rasa.core import training from rasa.core.actions.action import ( ACTION_DEFAULT_ASK_AFFIRMATION_NAME, @@ -26,15 +24,29 @@ MaxHistoryTrackerFeaturizer, FullDialogueTrackerFeaturizer, ) +from rasa.core.interpreter import RegexInterpreter +from rasa.core.policies.form_policy import FormPolicy +from rasa.core.policies.policy import SupportedData, Policy +from rasa.core.policies.rule_policy import RulePolicy from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy -from rasa.core.policies.embedding_policy import EmbeddingPolicy +from rasa.core.policies.ted_policy import TEDPolicy from rasa.core.policies.fallback import FallbackPolicy -from rasa.core.policies.form_policy import FormPolicy -from rasa.core.policies.keras_policy import KerasPolicy from rasa.core.policies.mapping_policy import MappingPolicy from rasa.core.policies.memoization import AugmentedMemoizationPolicy, MemoizationPolicy from rasa.core.policies.sklearn_policy import SklearnPolicy from rasa.core.trackers import DialogueStateTracker +from rasa.nlu.constants import INTENT_NAME_KEY +from rasa.utils.tensorflow.constants import ( + SIMILARITY_TYPE, + RANKING_LENGTH, + LOSS_TYPE, + SCALE_LOSS, + EVAL_NUM_EXAMPLES, + KEY_RELATIVE_ATTENTION, + VALUE_RELATIVE_ATTENTION, + MAX_RELATIVE_POSITION, +) +from rasa.utils import train_utils from tests.core.conftest import ( DEFAULT_DOMAIN_PATH_WITH_MAPPING, DEFAULT_DOMAIN_PATH_WITH_SLOTS, @@ -43,50 +55,12 @@ from tests.core.utilities import get_tracker, read_dialogue_file, user_uttered -def tf_defaults(): - return { - "tf_config": { - "device_count": {"CPU": 4}, - # tell tf.Session to use CPU limit, if you have - # more CPU, you can increase this value appropriately - "inter_op_parallelism_threads": 0, - # the number of threads in the thread pool available - # for each process for blocking operation nodes set to 0 - # to allow the system to select the appropriate value. - "intra_op_parallelism_threads": 0, # tells the degree of thread - # parallelism of the tf.Session operation. - # the smaller the value, the less reuse the thread will have - # and the more likely it will use more CPU cores. - # if the value is 0, - # tensorflow will automatically select an appropriate value. - "gpu_options": {"allow_growth": True} - # if set True, will try to allocate - # as much GPU memory as possible to support running - } - } - - -def session_config(): - import tensorflow as tf - - return tf.ConfigProto(**tf_defaults()["tf_config"]) - - async def train_trackers(domain, augmentation_factor=20): return await training.load_data( DEFAULT_STORIES_FILE, domain, augmentation_factor=augmentation_factor ) -@pytest.fixture(scope="module") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - # We are going to use class style testing here since unfortunately pytest # doesn't support using fixtures as arguments to its own parameterize yet # (hence, we can't train a policy, declare it as a fixture and use the @@ -94,7 +68,7 @@ def loop(): # Therefore, we are going to reverse this and train the policy within a class # and collect the tests in a base class. # noinspection PyMethodMayBeStatic -class PolicyTestCollection(object): +class PolicyTestCollection: """Tests every policy needs to fulfill. Each policy can declare further tests on its own.""" @@ -115,12 +89,20 @@ def featurizer(self): def priority(self): return 1 + @pytest.fixture(scope="module") + def default_domain(self): + return Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS) + + @pytest.fixture(scope="module") + def tracker(self, default_domain): + return DialogueStateTracker(UserMessage.DEFAULT_SENDER_ID, default_domain.slots) + @pytest.fixture(scope="module") async def trained_policy(self, featurizer, priority): default_domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS) policy = self.create_policy(featurizer, priority) training_trackers = await train_trackers(default_domain, augmentation_factor=20) - policy.train(training_trackers, default_domain) + policy.train(training_trackers, default_domain, RegexInterpreter()) return policy def test_featurizer(self, trained_policy, tmpdir): @@ -137,12 +119,6 @@ def test_featurizer(self, trained_policy, tmpdir): loaded.featurizer.state_featurizer, BinarySingleStateFeaturizer ) - async def test_continue_training(self, trained_policy, default_domain): - training_trackers = await train_trackers(default_domain, augmentation_factor=0) - trained_policy.continue_training( - training_trackers, default_domain, **{"epochs": 1} - ) - async def test_persist_and_load(self, trained_policy, default_domain, tmpdir): trained_policy.persist(tmpdir.strpath) loaded = trained_policy.__class__.load(tmpdir.strpath) @@ -177,17 +153,6 @@ def test_persist_and_load_empty_policy(self, tmpdir): loaded = empty_policy.__class__.load(tmpdir.strpath) assert loaded is not None - def test_tf_config(self, trained_policy, tmpdir): - if hasattr(trained_policy, "session"): - import tensorflow as tf - - # noinspection PyProtectedMember - assert trained_policy.session._config == tf.Session()._config - trained_policy.persist(tmpdir.strpath) - loaded = trained_policy.__class__.load(tmpdir.strpath) - # noinspection PyProtectedMember - assert loaded.session._config == tf.Session()._config - @staticmethod def _get_next_action(policy, events, domain): tracker = get_tracker(events) @@ -197,26 +162,6 @@ def _get_next_action(policy, events, domain): return domain.action_names[index] -class TestKerasPolicy(PolicyTestCollection): - def create_policy(self, featurizer, priority): - p = KerasPolicy(featurizer, priority) - return p - - -class TestKerasPolicyWithTfConfig(PolicyTestCollection): - def create_policy(self, featurizer, priority): - p = KerasPolicy(featurizer, priority, **tf_defaults()) - return p - - def test_tf_config(self, trained_policy, tmpdir): - # noinspection PyProtectedMember - assert trained_policy.session._config == session_config() - trained_policy.persist(tmpdir.strpath) - loaded = trained_policy.__class__.load(tmpdir.strpath) - # noinspection PyProtectedMember - assert loaded.session._config == session_config() - - class TestSklearnPolicy(PolicyTestCollection): def create_policy(self, featurizer, priority, **kwargs): p = SklearnPolicy(featurizer, priority, **kwargs) @@ -230,14 +175,6 @@ def mock_search(self): gs.return_value = gs # for __init__ yield gs - @pytest.fixture(scope="module") - def default_domain(self): - return Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS) - - @pytest.fixture - def tracker(self, default_domain): - return DialogueStateTracker(UserMessage.DEFAULT_SENDER_ID, default_domain.slots) - @pytest.fixture(scope="module") async def trackers(self, default_domain): return await train_trackers(default_domain, augmentation_factor=20) @@ -246,13 +183,18 @@ def test_additional_train_args_do_not_raise( self, default_domain, trackers, featurizer, priority ): policy = self.create_policy(featurizer=featurizer, priority=priority, cv=None) - policy.train(trackers, domain=default_domain, this_is_not_a_feature=True) + policy.train( + trackers, + domain=default_domain, + interpreter=RegexInterpreter(), + this_is_not_a_feature=True, + ) def test_cv_none_does_not_trigger_search( self, mock_search, default_domain, trackers, featurizer, priority ): policy = self.create_policy(featurizer=featurizer, priority=priority, cv=None) - policy.train(trackers, domain=default_domain) + policy.train(trackers, domain=default_domain, interpreter=RegexInterpreter()) assert mock_search.call_count == 0 assert policy.model != "mockmodel" @@ -262,7 +204,7 @@ def test_cv_not_none_param_grid_none_triggers_search_without_params( ): policy = self.create_policy(featurizer=featurizer, priority=priority, cv=3) - policy.train(trackers, domain=default_domain) + policy.train(trackers, domain=default_domain, interpreter=RegexInterpreter()) assert mock_search.call_count > 0 assert mock_search.call_args_list[0][1]["cv"] == 3 @@ -276,7 +218,7 @@ def test_cv_not_none_param_grid_none_triggers_search_with_params( policy = self.create_policy( featurizer=featurizer, priority=priority, cv=3, param_grid=param_grid ) - policy.train(trackers, domain=default_domain) + policy.train(trackers, domain=default_domain, interpreter=RegexInterpreter()) assert mock_search.call_count > 0 assert mock_search.call_args_list[0][1]["cv"] == 3 @@ -308,7 +250,9 @@ def test_missing_classes_filled_correctly( new_trackers.append(new_tracker) - policy.train(new_trackers, domain=default_domain) + policy.train( + new_trackers, domain=default_domain, interpreter=RegexInterpreter() + ) predicted_probabilities = policy.predict_action_probabilities( tracker, default_domain ) @@ -327,7 +271,7 @@ def test_train_kwargs_are_set_on_model( policy = self.create_policy( featurizer=featurizer, priority=priority, cv=None, C=123 ) - policy.train(trackers, domain=default_domain) + policy.train(trackers, domain=default_domain, interpreter=RegexInterpreter()) assert policy.model.C == 123 def test_train_with_shuffle_false( @@ -337,76 +281,141 @@ def test_train_with_shuffle_false( featurizer=featurizer, priority=priority, shuffle=False ) # does not raise - policy.train(trackers, domain=default_domain) + policy.train(trackers, domain=default_domain, interpreter=RegexInterpreter()) -class TestEmbeddingPolicy(PolicyTestCollection): +class TestTEDPolicy(PolicyTestCollection): def create_policy(self, featurizer, priority): - p = EmbeddingPolicy(featurizer=featurizer, priority=priority) + p = TEDPolicy(featurizer=featurizer, priority=priority) return p def test_similarity_type(self, trained_policy): - assert trained_policy.similarity_type == "inner" + assert trained_policy.config[SIMILARITY_TYPE] == "inner" + + def test_ranking_length(self, trained_policy): + assert trained_policy.config[RANKING_LENGTH] == 10 + + def test_normalization(self, trained_policy, tracker, default_domain, monkeypatch): + # first check the output is what we expect + predicted_probabilities = trained_policy.predict_action_probabilities( + tracker, default_domain + ) + # count number of non-zero confidences + assert ( + sum([confidence > 0 for confidence in predicted_probabilities]) + == trained_policy.config[RANKING_LENGTH] + ) + # check that the norm is still 1 + assert sum(predicted_probabilities) == pytest.approx(1) + + # also check our function is called + mock = Mock() + monkeypatch.setattr(train_utils, "normalize", mock.normalize) + trained_policy.predict_action_probabilities(tracker, default_domain) + + mock.normalize.assert_called_once() async def test_gen_batch(self, trained_policy, default_domain): training_trackers = await train_trackers(default_domain, augmentation_factor=0) training_data = trained_policy.featurize_for_training( training_trackers, default_domain ) - session_data = trained_policy._create_session_data( - training_data.X, training_data.y - ) + model_data = trained_policy._create_model_data(training_data.X, training_data.y) batch_size = 2 - batch_x, batch_y = next( - train_utils.gen_batch(session_data=session_data, batch_size=batch_size) - ) + batch_x, batch_y, _ = next(model_data._gen_batch(batch_size=batch_size)) assert batch_x.shape[0] == batch_size and batch_y.shape[0] == batch_size assert ( - batch_x[0].shape == session_data.X[0].shape - and batch_y[0].shape == session_data.Y[0].shape - ) - batch_x, batch_y = next( - train_utils.gen_batch( - session_data=session_data, - batch_size=batch_size, - batch_strategy="balanced", - shuffle=True, + batch_x[0].shape == model_data.get("dialogue_features")[0][0].shape + and batch_y[0].shape == model_data.get("label_features")[0][0].shape + ) + batch_x, batch_y, _ = next( + model_data._gen_batch( + batch_size=batch_size, batch_strategy="balanced", shuffle=True ) ) assert batch_x.shape[0] == batch_size and batch_y.shape[0] == batch_size assert ( - batch_x[0].shape == session_data.X[0].shape - and batch_y[0].shape == session_data.Y[0].shape + batch_x[0].shape == model_data.get("dialogue_features")[0][0].shape + and batch_y[0].shape == model_data.get("label_features")[0][0].shape ) -class TestEmbeddingPolicyMargin(TestEmbeddingPolicy): +class TestTEDPolicyMargin(TestTEDPolicy): def create_policy(self, featurizer, priority): - p = EmbeddingPolicy( - featurizer=featurizer, priority=priority, **{"loss_type": "margin"} - ) + p = TEDPolicy(featurizer=featurizer, priority=priority, **{LOSS_TYPE: "margin"}) return p def test_similarity_type(self, trained_policy): - assert trained_policy.similarity_type == "cosine" + assert trained_policy.config[SIMILARITY_TYPE] == "cosine" + + def test_normalization(self, trained_policy, tracker, default_domain, monkeypatch): + # Mock actual normalization method + mock = Mock() + monkeypatch.setattr(train_utils, "normalize", mock.normalize) + trained_policy.predict_action_probabilities(tracker, default_domain) + + # function should not get called for margin loss_type + mock.normalize.assert_not_called() -class TestEmbeddingPolicyWithEval(TestEmbeddingPolicy): +class TestTEDPolicyWithEval(TestTEDPolicy): def create_policy(self, featurizer, priority): - p = EmbeddingPolicy( + p = TEDPolicy( featurizer=featurizer, priority=priority, - **{"scale_loss": False, "evaluate_on_num_examples": 4} + **{SCALE_LOSS: False, EVAL_NUM_EXAMPLES: 4}, + ) + return p + + +class TestTEDPolicyNoNormalization(TestTEDPolicy): + def create_policy(self, featurizer, priority): + p = TEDPolicy(featurizer=featurizer, priority=priority, **{RANKING_LENGTH: 0}) + return p + + def test_ranking_length(self, trained_policy): + assert trained_policy.config[RANKING_LENGTH] == 0 + + def test_normalization(self, trained_policy, tracker, default_domain, monkeypatch): + # first check the output is what we expect + predicted_probabilities = trained_policy.predict_action_probabilities( + tracker, default_domain ) + # there should be no normalization + assert all([confidence > 0 for confidence in predicted_probabilities]) + + # also check our function is not called + mock = Mock() + monkeypatch.setattr(train_utils, "normalize", mock.normalize) + trained_policy.predict_action_probabilities(tracker, default_domain) + + mock.normalize.assert_not_called() + + +class TestTEDPolicyLowRankingLength(TestTEDPolicy): + def create_policy(self, featurizer, priority): + p = TEDPolicy(featurizer=featurizer, priority=priority, **{RANKING_LENGTH: 3}) + return p + + def test_ranking_length(self, trained_policy): + assert trained_policy.config[RANKING_LENGTH] == 3 + + +class TestTEDPolicyHighRankingLength(TestTEDPolicy): + def create_policy(self, featurizer, priority): + p = TEDPolicy(featurizer=featurizer, priority=priority, **{RANKING_LENGTH: 11}) return p + def test_ranking_length(self, trained_policy): + assert trained_policy.config[RANKING_LENGTH] == 11 -class TestEmbeddingPolicyWithFullDialogue(TestEmbeddingPolicy): + +class TestTEDPolicyWithFullDialogue(TestTEDPolicy): def create_policy(self, featurizer, priority): - # use standard featurizer from EmbeddingPolicy, + # use standard featurizer from TEDPolicy, # since it is using FullDialogueTrackerFeaturizer # if max_history is not specified - p = EmbeddingPolicy(priority=priority) + p = TEDPolicy(priority=priority) return p def test_featurizer(self, trained_policy, tmpdir): @@ -423,12 +432,12 @@ def test_featurizer(self, trained_policy, tmpdir): ) -class TestEmbeddingPolicyWithMaxHistory(TestEmbeddingPolicy): +class TestTEDPolicyWithMaxHistory(TestTEDPolicy): def create_policy(self, featurizer, priority): - # use standard featurizer from EmbeddingPolicy, + # use standard featurizer from TEDPolicy, # since it is using MaxHistoryTrackerFeaturizer # if max_history is specified - p = EmbeddingPolicy(priority=priority, max_history=self.max_history) + p = TEDPolicy(priority=priority, max_history=self.max_history) return p def test_featurizer(self, trained_policy, tmpdir): @@ -447,18 +456,35 @@ def test_featurizer(self, trained_policy, tmpdir): ) -class TestEmbeddingPolicyWithTfConfig(TestEmbeddingPolicy): +class TestTEDPolicyWithRelativeAttention(TestTEDPolicy): def create_policy(self, featurizer, priority): - p = EmbeddingPolicy(featurizer=featurizer, priority=priority, **tf_defaults()) + p = TEDPolicy( + featurizer=featurizer, + priority=priority, + **{ + KEY_RELATIVE_ATTENTION: True, + VALUE_RELATIVE_ATTENTION: True, + MAX_RELATIVE_POSITION: 5, + }, + ) return p - def test_tf_config(self, trained_policy, tmpdir): - # noinspection PyProtectedMember - assert trained_policy.session._config == session_config() - trained_policy.persist(tmpdir.strpath) - loaded = trained_policy.__class__.load(tmpdir.strpath) - # noinspection PyProtectedMember - assert loaded.session._config == session_config() + +class TestTEDPolicyWithRelativeAttentionMaxHistoryOne(TestTEDPolicy): + + max_history = 1 + + def create_policy(self, featurizer, priority): + p = TEDPolicy( + featurizer=featurizer, + priority=priority, + **{ + KEY_RELATIVE_ATTENTION: True, + VALUE_RELATIVE_ATTENTION: True, + MAX_RELATIVE_POSITION: 5, + }, + ) + return p class TestMemoizationPolicy(PolicyTestCollection): @@ -479,7 +505,7 @@ def test_featurizer(self, trained_policy, tmpdir): async def test_memorise(self, trained_policy, default_domain): trackers = await train_trackers(default_domain, augmentation_factor=20) - trained_policy.train(trackers, default_domain) + trained_policy.train(trackers, default_domain, RegexInterpreter()) lookup_with_augmentation = trained_policy.lookup trackers = [ @@ -495,7 +521,7 @@ async def test_memorise(self, trained_policy, default_domain): for tracker, states, actions in zip(trackers, all_states, all_actions): recalled = trained_policy.recall(states, tracker, default_domain) - assert recalled == default_domain.index_for_action(actions[0]) + assert recalled == actions[0] nums = np.random.randn(default_domain.num_states) random_states = [{f: num for f, num in zip(default_domain.input_states, nums)}] @@ -505,7 +531,9 @@ async def test_memorise(self, trained_policy, default_domain): trackers_no_augmentation = await train_trackers( default_domain, augmentation_factor=0 ) - trained_policy.train(trackers_no_augmentation, default_domain) + trained_policy.train( + trackers_no_augmentation, default_domain, RegexInterpreter() + ) lookup_no_augmentation = trained_policy.lookup assert lookup_no_augmentation == lookup_with_augmentation @@ -538,10 +566,10 @@ def create_policy(self, featurizer, priority): p = FormPolicy(priority=priority) return p - async def test_memorise(self, trained_policy, default_domain): + async def test_memorise(self, trained_policy: FormPolicy, default_domain: Domain): domain = Domain.load("data/test_domains/form.yml") trackers = await training.load_data("data/test_stories/stories_form.md", domain) - trained_policy.train(trackers, domain) + trained_policy.train(trackers, domain, RegexInterpreter()) ( all_states, @@ -740,9 +768,6 @@ def create_policy(self, featurizer, priority): @pytest.fixture(scope="class") def default_domain(self): content = """ - actions: - - utter_hello - intents: - greet - bye @@ -782,7 +807,7 @@ async def test_affirmation(self, default_channel, default_nlg, default_domain): events, default_channel, default_nlg, default_domain ) - assert "greet" == tracker.latest_message.parse_data["intent"]["name"] + assert "greet" == tracker.latest_message.parse_data["intent"][INTENT_NAME_KEY] assert tracker.export_stories() == ( "## sender\n* greet\n - utter_hello\n* greet\n" ) @@ -818,7 +843,7 @@ async def test_successful_rephrasing( events, default_channel, default_nlg, default_domain ) - assert "bye" == tracker.latest_message.parse_data["intent"]["name"] + assert "bye" == tracker.latest_message.parse_data["intent"][INTENT_NAME_KEY] assert tracker.export_stories() == "## sender\n* bye\n" def test_affirm_rephrased_intent(self, trained_policy, default_domain): @@ -858,7 +883,7 @@ async def test_affirmed_rephrasing( events, default_channel, default_nlg, default_domain ) - assert "bye" == tracker.latest_message.parse_data["intent"]["name"] + assert "bye" == tracker.latest_message.parse_data["intent"][INTENT_NAME_KEY] assert tracker.export_stories() == "## sender\n* bye\n" def test_denied_rephrasing_affirmation(self, trained_policy, default_domain): @@ -898,7 +923,7 @@ async def test_rephrasing_instead_affirmation( events, default_channel, default_nlg, default_domain ) - assert "bye" == tracker.latest_message.parse_data["intent"]["name"] + assert "bye" == tracker.latest_message.parse_data["intent"][INTENT_NAME_KEY] assert tracker.export_stories() == ( "## sender\n* greet\n - utter_hello\n* bye\n" ) @@ -922,3 +947,60 @@ def test_listen_after_hand_off(self, trained_policy, default_domain): next_action = self._get_next_action(trained_policy, events, default_domain) assert next_action == ACTION_LISTEN_NAME + + +@pytest.mark.parametrize( + "policy,supported_data", + [ + (TEDPolicy, SupportedData.ML_DATA), + (RulePolicy, SupportedData.ML_AND_RULE_DATA), + (MemoizationPolicy, SupportedData.ML_DATA), + ], +) +def test_supported_data(policy: Type[Policy], supported_data: SupportedData): + assert policy.supported_data() == supported_data + + +class OnlyRulePolicy(Policy): + """Test policy that supports both rule-based and ML-based training data.""" + + @staticmethod + def supported_data() -> SupportedData: + return SupportedData.RULE_DATA + + +@pytest.mark.parametrize( + "policy,n_rule_trackers,n_ml_trackers", + [ + (TEDPolicy(), 0, 3), + (RulePolicy(), 2, 3), + (OnlyRulePolicy, 2, 0), # policy can be passed as a `type` as well + ], +) +def test_get_training_trackers_for_policy( + policy: Policy, n_rule_trackers: int, n_ml_trackers +): + # create five trackers (two rule-based and three ML trackers) + trackers = [ + DialogueStateTracker("id1", slots=[], is_rule_tracker=True), + DialogueStateTracker("id2", slots=[], is_rule_tracker=False), + DialogueStateTracker("id3", slots=[], is_rule_tracker=False), + DialogueStateTracker("id4", slots=[], is_rule_tracker=True), + DialogueStateTracker("id5", slots=[], is_rule_tracker=False), + ] + + trackers = SupportedData.trackers_for_policy(policy, trackers) + + rule_trackers = [tracker for tracker in trackers if tracker.is_rule_tracker] + ml_trackers = [tracker for tracker in trackers if not tracker.is_rule_tracker] + + assert len(rule_trackers) == n_rule_trackers + assert len(ml_trackers) == n_ml_trackers + + +@pytest.mark.parametrize( + "policy", [FormPolicy, MappingPolicy, FallbackPolicy, TwoStageFallbackPolicy] +) +def test_deprecation_warnings_for_old_rule_like_policies(policy: Type[Policy]): + with pytest.warns(FutureWarning): + policy(None) diff --git a/tests/core/test_processor.py b/tests/core/test_processor.py index f0cdd3fb3a45..92686727b1a6 100644 --- a/tests/core/test_processor.py +++ b/tests/core/test_processor.py @@ -1,16 +1,21 @@ -import aiohttp import asyncio -import datetime -import uuid +import datetime import pytest +import time +import uuid +import json +from _pytest.monkeypatch import MonkeyPatch from aioresponses import aioresponses -from unittest.mock import patch +from typing import Optional, Text, List, Callable +from unittest.mock import patch, Mock -import rasa.utils.io from rasa.core import jobs +from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME + from rasa.core.agent import Agent from rasa.core.channels.channel import CollectingOutputChannel, UserMessage +from rasa.core.domain import SessionConfig, Domain from rasa.core.events import ( ActionExecuted, BotUttered, @@ -18,44 +23,41 @@ ReminderScheduled, Restarted, UserUttered, + SessionStarted, + Event, + SlotSet, ) -from rasa.core.trackers import DialogueStateTracker -from rasa.core.slots import Slot -from rasa.core.processor import MessageProcessor -from rasa.core.interpreter import RasaNLUHttpInterpreter +from rasa.core.interpreter import RasaNLUHttpInterpreter, NaturalLanguageInterpreter +from rasa.core.policies import SimplePolicyEnsemble +from rasa.core.policies.ted_policy import TEDPolicy from rasa.core.processor import MessageProcessor +from rasa.core.slots import Slot +from rasa.core.tracker_store import InMemoryTrackerStore +from rasa.core.trackers import DialogueStateTracker +from rasa.nlu.constants import INTENT_NAME_KEY from rasa.utils.endpoints import EndpointConfig -from tests.utilities import json_of_latest_request, latest_request +from tests.utilities import latest_request + +from rasa.core.constants import EXTERNAL_MESSAGE_PREFIX, IS_EXTERNAL, DEFAULT_INTENTS import logging logger = logging.getLogger(__name__) -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - async def test_message_processor( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): await default_processor.handle_message( UserMessage('/greet{"name":"Core"}', default_channel) ) - assert { + assert default_channel.latest_output() == { "recipient_id": "default", "text": "hey there Core!", - } == default_channel.latest_output() + } async def test_message_id_logging(default_processor: MessageProcessor): - from rasa.core.trackers import DialogueStateTracker - message = UserMessage("If Meg was an egg would she still have a leg?") tracker = DialogueStateTracker("1", []) await default_processor._handle_message_with_tracker(message, tracker) @@ -68,10 +70,38 @@ async def test_message_id_logging(default_processor: MessageProcessor): async def test_parsing(default_processor: MessageProcessor): message = UserMessage('/greet{"name": "boy"}') parsed = await default_processor._parse_message(message) - assert parsed["intent"]["name"] == "greet" + assert parsed["intent"][INTENT_NAME_KEY] == "greet" assert parsed["entities"][0]["entity"] == "name" +async def test_check_for_unseen_feature(default_processor: MessageProcessor): + message = UserMessage('/dislike{"test_entity": "RASA"}') + parsed = await default_processor._parse_message(message) + with pytest.warns(UserWarning) as record: + default_processor._check_for_unseen_features(parsed) + assert len(record) == 2 + + assert ( + record[0].message.args[0].startswith("Interpreter parsed an intent 'dislike'") + ) + assert ( + record[1] + .message.args[0] + .startswith("Interpreter parsed an entity 'test_entity'") + ) + + +@pytest.mark.parametrize("default_intent", DEFAULT_INTENTS) +async def test_default_intent_recognized( + default_processor: MessageProcessor, default_intent: Text +): + message = UserMessage(default_intent) + parsed = await default_processor._parse_message(message) + with pytest.warns(None) as record: + default_processor._check_for_unseen_features(parsed) + assert len(record) == 0 + + async def test_http_parsing(): message = UserMessage("lunch?") @@ -79,7 +109,7 @@ async def test_http_parsing(): with aioresponses() as mocked: mocked.post("https://interpreter.com/model/parse", repeat=True, status=200) - inter = RasaNLUHttpInterpreter(endpoint=endpoint) + inter = RasaNLUHttpInterpreter(endpoint_config=endpoint) try: await MessageProcessor(inter, None, None, None, None)._parse_message( message @@ -97,7 +127,7 @@ async def mocked_parse(self, text, message_id=None, tracker=None): value from the tracker's state.""" return { - "intent": {"name": "", "confidence": 0.0}, + "intent": {INTENT_NAME_KEY: "", "confidence": 0.0}, "entities": [], "text": text, "requested_language": tracker.get_slot("requested_language"), @@ -116,7 +146,7 @@ async def test_parsing_with_tracker(): # mock the parse function with the one defined for this test with patch.object(RasaNLUHttpInterpreter, "parse", mocked_parse): - interpreter = RasaNLUHttpInterpreter(endpoint=endpoint) + interpreter = RasaNLUHttpInterpreter(endpoint_config=endpoint) agent = Agent(None, None, interpreter) result = await agent.parse_message_using_nlu_interpreter("lunch?", tracker) @@ -128,32 +158,28 @@ async def test_reminder_scheduled( ): sender_id = uuid.uuid4().hex - reminder = ReminderScheduled("utter_greet", datetime.datetime.now()) + reminder = ReminderScheduled("remind", datetime.datetime.now()) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.update(UserUttered("test")) - tracker.update(ActionExecuted("action_reminder_reminder")) + tracker.update(ActionExecuted("action_schedule_reminder")) tracker.update(reminder) default_processor.tracker_store.save(tracker) + await default_processor.handle_reminder( reminder, sender_id, default_channel, default_processor.nlg ) # retrieve the updated tracker t = default_processor.tracker_store.retrieve(sender_id) - assert t.events[-4] == UserUttered(None) - assert t.events[-3] == ActionExecuted("utter_greet") - assert t.events[-2] == BotUttered( - "hey there None!", - { - "elements": None, - "buttons": None, - "quick_replies": None, - "attachment": None, - "image": None, - "custom": None, - }, + + assert t.events[-5] == UserUttered("test") + assert t.events[-4] == ActionExecuted("action_schedule_reminder") + assert isinstance(t.events[-3], ReminderScheduled) + assert t.events[-2] == UserUttered( + f"{EXTERNAL_MESSAGE_PREFIX}remind", + intent={INTENT_NAME_KEY: "remind", IS_EXTERNAL: True}, ) assert t.events[-1] == ActionExecuted("action_listen") @@ -181,7 +207,22 @@ async def test_reminder_aborted( assert len(t.events) == 3 # nothing should have been executed -async def test_reminder_cancelled( +async def wait_until_all_jobs_were_executed( + timeout_after_seconds: Optional[float] = None, +) -> None: + total_seconds = 0.0 + while len((await jobs.scheduler()).get_jobs()) > 0 and ( + not timeout_after_seconds or total_seconds < timeout_after_seconds + ): + await asyncio.sleep(0.1) + total_seconds += 0.1 + + if total_seconds >= timeout_after_seconds: + jobs.kill_scheduler() + raise TimeoutError + + +async def test_reminder_cancelled_multi_user( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_ids = [uuid.uuid4().hex, uuid.uuid4().hex] @@ -193,13 +234,13 @@ async def test_reminder_cancelled( tracker.update(ActionExecuted("action_reminder_reminder")) tracker.update( ReminderScheduled( - "utter_greet", datetime.datetime.now(), kill_on_user_message=True + "greet", datetime.datetime.now(), kill_on_user_message=True ) ) trackers.append(tracker) - # cancel reminder for the first user - trackers[0].update(ReminderCancelled("utter_greet")) + # cancel all reminders (one) for the first user + trackers[0].update(ReminderCancelled()) for tracker in trackers: default_processor.tracker_store.save(tracker) @@ -215,15 +256,145 @@ async def test_reminder_cancelled( assert len((await jobs.scheduler()).get_jobs()) == 1 # execute the jobs - await asyncio.sleep(3) + await wait_until_all_jobs_were_executed(timeout_after_seconds=5.0) tracker_0 = default_processor.tracker_store.retrieve(sender_ids[0]) # there should be no utter_greet action - assert ActionExecuted("utter_greet") not in tracker_0.events + assert ( + UserUttered( + f"{EXTERNAL_MESSAGE_PREFIX}greet", + intent={INTENT_NAME_KEY: "greet", IS_EXTERNAL: True}, + ) + not in tracker_0.events + ) tracker_1 = default_processor.tracker_store.retrieve(sender_ids[1]) # there should be utter_greet action - assert ActionExecuted("utter_greet") in tracker_1.events + assert ( + UserUttered( + f"{EXTERNAL_MESSAGE_PREFIX}greet", + intent={INTENT_NAME_KEY: "greet", IS_EXTERNAL: True}, + ) + in tracker_1.events + ) + + +async def test_reminder_cancelled_cancels_job_with_name( + default_channel: CollectingOutputChannel, default_processor: MessageProcessor +): + sender_id = "][]][xy,,=+2f'[:/;>] <0d]A[e_,02" + + reminder = ReminderScheduled( + intent="greet", trigger_date_time=datetime.datetime.now() + ) + job_name = reminder.scheduled_job_name(sender_id) + reminder_cancelled = ReminderCancelled() + + assert reminder_cancelled.cancels_job_with_name(job_name, sender_id) + assert not reminder_cancelled.cancels_job_with_name(job_name.upper(), sender_id) + + +async def test_reminder_cancelled_cancels_job_with_name_special_name( + default_channel: CollectingOutputChannel, default_processor: MessageProcessor +): + sender_id = "][]][xy,,=+2f'[:/; >]<0d]A[e_,02" + name = "wkjbgr,34(,*&%^^&*(OP#LKMN V#NF# # #R" + + reminder = ReminderScheduled( + intent="greet", trigger_date_time=datetime.datetime.now(), name=name + ) + job_name = reminder.scheduled_job_name(sender_id) + reminder_cancelled = ReminderCancelled(name) + + assert reminder_cancelled.cancels_job_with_name(job_name, sender_id) + assert not reminder_cancelled.cancels_job_with_name(job_name.upper(), sender_id) + + +async def cancel_reminder_and_check( + tracker: DialogueStateTracker, + default_processor: MessageProcessor, + reminder_canceled_event: ReminderCancelled, + num_jobs_before: int, + num_jobs_after: int, +) -> None: + # cancel the sixth reminder + tracker.update(reminder_canceled_event) + + # check that the jobs were added + assert len((await jobs.scheduler()).get_jobs()) == num_jobs_before + + await default_processor._cancel_reminders(tracker.events, tracker) + + # check that only one job was removed + assert len((await jobs.scheduler()).get_jobs()) == num_jobs_after + + +async def test_reminder_cancelled_by_name( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + tracker_with_six_scheduled_reminders: DialogueStateTracker, +): + tracker = tracker_with_six_scheduled_reminders + await default_processor._schedule_reminders( + tracker.events, tracker, default_channel, default_processor.nlg + ) + + # cancel the sixth reminder + await cancel_reminder_and_check( + tracker, default_processor, ReminderCancelled("special"), 6, 5 + ) + + +async def test_reminder_cancelled_by_entities( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + tracker_with_six_scheduled_reminders: DialogueStateTracker, +): + tracker = tracker_with_six_scheduled_reminders + await default_processor._schedule_reminders( + tracker.events, tracker, default_channel, default_processor.nlg + ) + + # cancel the fourth reminder + await cancel_reminder_and_check( + tracker, + default_processor, + ReminderCancelled(entities=[{"entity": "name", "value": "Bruce Wayne"}]), + 6, + 5, + ) + + +async def test_reminder_cancelled_by_intent( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + tracker_with_six_scheduled_reminders: DialogueStateTracker, +): + tracker = tracker_with_six_scheduled_reminders + await default_processor._schedule_reminders( + tracker.events, tracker, default_channel, default_processor.nlg + ) + + # cancel the third, fifth, and sixth reminder + await cancel_reminder_and_check( + tracker, default_processor, ReminderCancelled(intent="default"), 6, 3 + ) + + +async def test_reminder_cancelled_all( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + tracker_with_six_scheduled_reminders: DialogueStateTracker, +): + tracker = tracker_with_six_scheduled_reminders + await default_processor._schedule_reminders( + tracker.events, tracker, default_channel, default_processor.nlg + ) + + # cancel all reminders + await cancel_reminder_and_check( + tracker, default_processor, ReminderCancelled(), 6, 0 + ) async def test_reminder_restart( @@ -248,3 +419,307 @@ async def test_reminder_restart( # retrieve the updated tracker t = default_processor.tracker_store.retrieve(sender_id) assert len(t.events) == 4 # nothing should have been executed + + +@pytest.mark.parametrize( + "event_to_apply,session_expiration_time_in_minutes,has_expired", + [ + # last user event is way in the past + (UserUttered(timestamp=1), 60, True), + # user event are very recent + (UserUttered("hello", timestamp=time.time()), 120, False), + # there is user event + (ActionExecuted(ACTION_LISTEN_NAME, timestamp=time.time()), 60, False), + # Old event, but sessions are disabled + (UserUttered("hello", timestamp=1), 0, False), + # there is no event + (None, 1, False), + ], +) +async def test_has_session_expired( + event_to_apply: Optional[Event], + session_expiration_time_in_minutes: float, + has_expired: bool, + default_processor: MessageProcessor, +): + sender_id = uuid.uuid4().hex + + default_processor.domain.session_config = SessionConfig( + session_expiration_time_in_minutes, True + ) + # create new tracker without events + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + tracker.events.clear() + + # apply desired event + if event_to_apply: + tracker.update(event_to_apply) + + # noinspection PyProtectedMember + assert default_processor._has_session_expired(tracker) == has_expired + + +# noinspection PyProtectedMember +async def test_update_tracker_session( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, +): + sender_id = uuid.uuid4().hex + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + + # patch `_has_session_expired()` so the `_update_tracker_session()` call actually + # does something + monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) + + await default_processor._update_tracker_session(tracker, default_channel) + + # the save is not called in _update_tracker_session() + default_processor._save_tracker(tracker) + + # inspect tracker and make sure all events are present + tracker = default_processor.tracker_store.retrieve(sender_id) + + assert list(tracker.events) == [ + ActionExecuted(ACTION_LISTEN_NAME), + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + ActionExecuted(ACTION_LISTEN_NAME), + ] + + +# noinspection PyProtectedMember +async def test_update_tracker_session_with_metadata( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, +): + sender_id = uuid.uuid4().hex + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + + # patch `_has_session_expired()` so the `_update_tracker_session()` call actually + # does something + monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) + + metadata = {"metadataTestKey": "metadataTestValue"} + + await default_processor._update_tracker_session(tracker, default_channel, metadata) + + # the save is not called in _update_tracker_session() + default_processor._save_tracker(tracker) + + # inspect tracker events and make sure SessionStarted event is present + # and has metadata. + tracker = default_processor.tracker_store.retrieve(sender_id) + assert tracker.events.count(SessionStarted()) == 1 + + session_started_event_idx = tracker.events.index(SessionStarted()) + session_started_event_metadata = tracker.events[session_started_event_idx].metadata + + assert session_started_event_metadata == metadata + + +# noinspection PyProtectedMember +async def test_update_tracker_session_with_slots( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, +): + sender_id = uuid.uuid4().hex + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + + # apply a user uttered and five slots + user_event = UserUttered("some utterance") + tracker.update(user_event) + + slot_set_events = [SlotSet(f"slot key {i}", f"test value {i}") for i in range(5)] + + for event in slot_set_events: + tracker.update(event) + + # patch `_has_session_expired()` so the `_update_tracker_session()` call actually + # does something + monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) + + await default_processor._update_tracker_session(tracker, default_channel) + + # the save is not called in _update_tracker_session() + default_processor._save_tracker(tracker) + + # inspect tracker and make sure all events are present + tracker = default_processor.tracker_store.retrieve(sender_id) + events = list(tracker.events) + + # the first three events should be up to the user utterance + assert events[:2] == [ActionExecuted(ACTION_LISTEN_NAME), user_event] + + # next come the five slots + assert events[2:7] == slot_set_events + + # the next two events are the session start sequence + assert events[7:9] == [ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()] + + # the five slots should be reapplied + assert events[9:14] == slot_set_events + + # finally an action listen, this should also be the last event + assert events[14] == events[-1] == ActionExecuted(ACTION_LISTEN_NAME) + + +# noinspection PyProtectedMember +async def test_get_tracker_with_session_start( + default_channel: CollectingOutputChannel, default_processor: MessageProcessor +): + sender_id = uuid.uuid4().hex + tracker = await default_processor.get_tracker_with_session_start( + sender_id, default_channel + ) + + # ensure session start sequence is present + assert list(tracker.events) == [ + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + ActionExecuted(ACTION_LISTEN_NAME), + ] + + +async def test_handle_message_with_session_start( + default_channel: CollectingOutputChannel, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, +): + sender_id = uuid.uuid4().hex + + entity = "name" + slot_1 = {entity: "Core"} + await default_processor.handle_message( + UserMessage(f"/greet{json.dumps(slot_1)}", default_channel, sender_id) + ) + + assert default_channel.latest_output() == { + "recipient_id": sender_id, + "text": "hey there Core!", + } + + # patch processor so a session start is triggered + monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) + + slot_2 = {entity: "post-session start hello"} + # handle a new message + await default_processor.handle_message( + UserMessage(f"/greet{json.dumps(slot_2)}", default_channel, sender_id) + ) + + tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) + + # make sure the sequence of events is as expected + assert list(tracker.events) == [ + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered( + f"/greet{json.dumps(slot_1)}", + {INTENT_NAME_KEY: "greet", "confidence": 1.0}, + [{"entity": entity, "start": 6, "end": 22, "value": "Core"}], + ), + SlotSet(entity, slot_1[entity]), + ActionExecuted("utter_greet"), + BotUttered("hey there Core!", metadata={"template_name": "utter_greet"}), + ActionExecuted(ACTION_LISTEN_NAME), + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + # the initial SlotSet is reapplied after the SessionStarted sequence + SlotSet(entity, slot_1[entity]), + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered( + f"/greet{json.dumps(slot_2)}", + {INTENT_NAME_KEY: "greet", "confidence": 1.0}, + [ + { + "entity": entity, + "start": 6, + "end": 42, + "value": "post-session start hello", + } + ], + ), + SlotSet(entity, slot_2[entity]), + ActionExecuted(ACTION_LISTEN_NAME), + ] + + +# noinspection PyProtectedMember +@pytest.mark.parametrize( + "action_name, should_predict_another_action", + [ + (ACTION_LISTEN_NAME, False), + (ACTION_SESSION_START_NAME, False), + ("utter_greet", True), + ], +) +async def test_should_predict_another_action( + default_processor: MessageProcessor, + action_name: Text, + should_predict_another_action: bool, +): + assert ( + default_processor.should_predict_another_action(action_name) + == should_predict_another_action + ) + + +def test_get_next_action_probabilities_passes_interpreter_to_policies( + monkeypatch: MonkeyPatch, +): + policy = TEDPolicy() + test_interpreter = Mock() + + def predict_action_probabilities( + tracker: DialogueStateTracker, + domain: Domain, + interpreter: NaturalLanguageInterpreter, + **kwargs, + ) -> List[float]: + assert interpreter == test_interpreter + return [1, 0] + + policy.predict_action_probabilities = predict_action_probabilities + ensemble = SimplePolicyEnsemble(policies=[policy]) + + domain = Domain.empty() + + processor = MessageProcessor( + test_interpreter, ensemble, domain, InMemoryTrackerStore(domain), Mock() + ) + + # This should not raise + processor._get_next_action_probabilities( + DialogueStateTracker.from_events("lala", [ActionExecuted(ACTION_LISTEN_NAME)]) + ) + + +@pytest.mark.parametrize( + "predict_function", + [lambda tracker, domain: [1, 0], lambda tracker, domain, some_bool=True: [1, 0]], +) +def test_get_next_action_probabilities_pass_policy_predictions_without_interpreter_arg( + predict_function: Callable, +): + policy = TEDPolicy() + + policy.predict_action_probabilities = predict_function + + ensemble = SimplePolicyEnsemble(policies=[policy]) + interpreter = Mock() + domain = Domain.empty() + + processor = MessageProcessor( + interpreter, ensemble, domain, InMemoryTrackerStore(domain), Mock() + ) + + with pytest.warns(DeprecationWarning): + processor._get_next_action_probabilities( + DialogueStateTracker.from_events( + "lala", [ActionExecuted(ACTION_LISTEN_NAME)] + ) + ) diff --git a/tests/core/test_restore.py b/tests/core/test_restore.py index 0e2bf15969a2..98f6ea60918c 100644 --- a/tests/core/test_restore.py +++ b/tests/core/test_restore.py @@ -1,23 +1,10 @@ -import asyncio +from typing import Text -import pytest - -import rasa.utils.io from rasa.core import restore from rasa.core.agent import Agent -from rasa.model import get_model - - -@pytest.fixture(scope="module") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() -async def test_restoring_tracker(trained_moodbot_path, recwarn): +async def test_restoring_tracker(trained_moodbot_path: Text, recwarn): tracker_dump = "data/test_trackers/tracker_moodbot.json" agent = Agent.load(trained_moodbot_path) @@ -26,9 +13,11 @@ async def test_restoring_tracker(trained_moodbot_path, recwarn): await restore.replay_events(tracker, agent) - # makes sure there are no warnings. warnings are raised, if the models + # makes sure there are no warnings.warnings are raised, if the models # predictions differ from the tracker when the dumped tracker is replayed - assert [e for e in recwarn if e._category_name == "UserWarning"] == [] + # TODO tensorflow is printing a warning currently, should be resolved with an + # upcoming version (https://github.com/tensorflow/tensorflow/issues/35100) + # assert [e for e in recwarn if e._category_name == "UserWarning"] == [] assert len(tracker.events) == 7 assert tracker.latest_action_name == "action_listen" diff --git a/tests/core/test_run.py b/tests/core/test_run.py index f6028e26947e..d11099747607 100644 --- a/tests/core/test_run.py +++ b/tests/core/test_run.py @@ -27,7 +27,7 @@ def test_create_single_input_channels(): def test_create_single_input_channels_by_class(): channels = run.create_http_input_channels( - "rasa.core.channels.channel.RestInput", CREDENTIALS_FILE + "rasa.core.channels.rest.RestInput", CREDENTIALS_FILE ) assert len(channels) == 1 assert channels[0].name() == "rest" @@ -35,7 +35,7 @@ def test_create_single_input_channels_by_class(): def test_create_single_input_channels_by_class_wo_credentials(): channels = run.create_http_input_channels( - "rasa.core.channels.channel.RestInput", credentials_file=None + "rasa.core.channels.rest.RestInput", credentials_file=None ) assert len(channels) == 1 diff --git a/tests/core/test_slots.py b/tests/core/test_slots.py index c79ac6dce024..ee6e610d225f 100644 --- a/tests/core/test_slots.py +++ b/tests/core/test_slots.py @@ -1,6 +1,6 @@ -# coding=utf-8 import pytest +from rasa.core.constants import DEFAULT_CATEGORICAL_SLOT_VALUE from rasa.core.slots import ( Slot, TextSlot, @@ -12,7 +12,7 @@ ) -class SlotTestCollection(object): +class SlotTestCollection: """Tests every slot needs to fulfill. Each slot can declare further tests on its own.""" @@ -171,6 +171,32 @@ def invalid_value(self, request): ("two", [0, 1, 0, 0, 0]), ("小于", [0, 0, 1, 0, 0]), ({"three": 3}, [0, 0, 0, 1, 0]), + (DEFAULT_CATEGORICAL_SLOT_VALUE, [0, 0, 0, 0, 0]), + ] + ) + def value_feature_pair(self, request): + return request.param + + +class TestCategoricalSlotDefaultValue(SlotTestCollection): + def create_slot(self): + slot = CategoricalSlot("test", values=[1, "two", "小于", {"three": 3}, None]) + slot.add_default_value() + return slot + + @pytest.fixture(params=[{"a": "b"}, 2, True, "asd", "🌴"]) + def invalid_value(self, request): + return request.param + + @pytest.fixture( + params=[ + (None, [0, 0, 0, 0, 1, 0]), + (1, [1, 0, 0, 0, 0, 0]), + ("two", [0, 1, 0, 0, 0, 0]), + ("小于", [0, 0, 1, 0, 0, 0]), + ({"three": 3}, [0, 0, 0, 1, 0, 0]), + (DEFAULT_CATEGORICAL_SLOT_VALUE, [0, 0, 0, 0, 0, 1]), + ("unseen value", [0, 0, 0, 0, 0, 1]), ] ) def value_feature_pair(self, request): diff --git a/tests/core/test_story_conflict.py b/tests/core/test_story_conflict.py new file mode 100644 index 000000000000..bf057d9e8f71 --- /dev/null +++ b/tests/core/test_story_conflict.py @@ -0,0 +1,160 @@ +from typing import Text, List, Tuple + +from rasa.core.domain import Domain +from rasa.core.training.story_conflict import ( + StoryConflict, + find_story_conflicts, + _get_previous_event, +) +from rasa.core.training.generator import TrainingDataGenerator, TrackerWithCachedStates +from rasa.validator import Validator +from rasa.importers.rasa import RasaFileImporter +from tests.core.conftest import DEFAULT_STORIES_FILE, DEFAULT_DOMAIN_PATH_WITH_SLOTS + + +async def _setup_trackers_for_testing( + domain_path: Text, training_data_file: Text +) -> Tuple[List[TrackerWithCachedStates], Domain]: + importer = RasaFileImporter( + domain_path=domain_path, training_data_paths=[training_data_file] + ) + validator = await Validator.from_importer(importer) + + trackers = TrainingDataGenerator( + validator.story_graph, + domain=validator.domain, + remove_duplicates=False, + augmentation_factor=0, + ).generate() + + return trackers, validator.domain + + +async def test_find_no_conflicts(): + trackers, domain = await _setup_trackers_for_testing( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert conflicts == [] + + +async def test_find_conflicts_in_short_history(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_1.md" + ) + + # `max_history = 3` is too small, so a conflict must arise + conflicts = find_story_conflicts(trackers, domain, 3) + assert len(conflicts) == 1 + + # With `max_history = 4` the conflict should disappear + conflicts = find_story_conflicts(trackers, domain, 4) + assert len(conflicts) == 0 + + +async def test_find_conflicts_checkpoints(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_2.md" + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert len(conflicts) == 1 + assert conflicts[0].conflicting_actions == ["utter_goodbye", "utter_default"] + + +async def test_find_conflicts_or(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_3.md" + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert len(conflicts) == 1 + assert conflicts[0].conflicting_actions == ["utter_default", "utter_goodbye"] + + +async def test_find_conflicts_slots_that_break(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_4.md" + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert len(conflicts) == 1 + assert conflicts[0].conflicting_actions == ["utter_default", "utter_greet"] + + +async def test_find_conflicts_slots_that_dont_break(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_5.md" + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert len(conflicts) == 0 + + +async def test_find_conflicts_multiple_stories(): + trackers, domain = await _setup_trackers_for_testing( + "data/test_domains/default.yml", "data/test_stories/stories_conflicting_6.md" + ) + + # Create a list of `StoryConflict` objects + conflicts = find_story_conflicts(trackers, domain, 5) + + assert len(conflicts) == 1 + assert "and 2 other trackers" in str(conflicts[0]) + + +async def test_add_conflicting_action(): + sliced_states = [ + None, + {}, + {"intent_greet": 1.0, "prev_action_listen": 1.0}, + {"prev_utter_greet": 1.0, "intent_greet": 1.0}, + ] + conflict = StoryConflict(sliced_states) + + conflict.add_conflicting_action("utter_greet", "xyz") + conflict.add_conflicting_action("utter_default", "uvw") + assert conflict.conflicting_actions == ["utter_greet", "utter_default"] + + +async def test_has_prior_events(): + sliced_states = [ + None, + {}, + {"intent_greet": 1.0, "prev_action_listen": 1.0}, + {"prev_utter_greet": 1.0, "intent_greet": 1.0}, + ] + conflict = StoryConflict(sliced_states) + assert conflict.conflict_has_prior_events + + +async def test_get_previous_event(): + assert _get_previous_event({"prev_utter_greet": 1.0, "intent_greet": 1.0}) == ( + "action", + "utter_greet", + ) + assert _get_previous_event({"intent_greet": 1.0, "prev_utter_greet": 1.0}) == ( + "action", + "utter_greet", + ) + assert _get_previous_event({"intent_greet": 1.0, "prev_action_listen": 1.0}) == ( + "intent", + "greet", + ) + + +async def test_has_no_prior_events(): + sliced_states = [None] + conflict = StoryConflict(sliced_states) + assert not conflict.conflict_has_prior_events diff --git a/tests/core/test_structures.py b/tests/core/test_structures.py new file mode 100644 index 000000000000..ea688b3709e2 --- /dev/null +++ b/tests/core/test_structures.py @@ -0,0 +1,31 @@ +from rasa.core.actions.action import ACTION_SESSION_START_NAME +from rasa.core.domain import Domain +from rasa.core.events import SessionStarted, SlotSet, UserUttered, ActionExecuted +from rasa.core.trackers import DialogueStateTracker +from rasa.core.training.structures import Story + +domain = Domain.load("examples/moodbot/domain.yml") + + +def test_session_start_is_not_serialised(default_domain: Domain): + tracker = DialogueStateTracker("default", default_domain.slots) + # the retrieved tracker should be empty + assert len(tracker.events) == 0 + + # add SlotSet event + tracker.update(SlotSet("slot", "value")) + + # add the two SessionStarted events and a user event + tracker.update(ActionExecuted(ACTION_SESSION_START_NAME)) + tracker.update(SessionStarted()) + tracker.update(UserUttered("say something")) + + # make sure session start is not serialised + story = Story.from_events(tracker.events, "some-story01") + + expected = """## some-story01 + - slot{"slot": "value"} +* say something +""" + + assert story.as_story_string(flat=True) == expected diff --git a/tests/core/test_tracker_stores.py b/tests/core/test_tracker_stores.py index f3623cae7418..9c38c3dbfae4 100644 --- a/tests/core/test_tracker_stores.py +++ b/tests/core/test_tracker_stores.py @@ -1,27 +1,54 @@ -import tempfile +import logging +from contextlib import contextmanager +from pathlib import Path import pytest - +import sqlalchemy +import uuid + +from _pytest.capture import CaptureFixture +from _pytest.logging import LogCaptureFixture +from _pytest.monkeypatch import MonkeyPatch +from moto import mock_dynamodb2 +from sqlalchemy.dialects.postgresql.base import PGDialect +from sqlalchemy.dialects.sqlite.base import SQLiteDialect +from sqlalchemy.dialects.oracle.base import OracleDialect +from sqlalchemy.engine.url import URL +from typing import Tuple, Text, Type, Dict, List, Union, Optional, ContextManager +from unittest.mock import Mock + +import rasa.core.tracker_store +from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME from rasa.core.channels.channel import UserMessage +from rasa.core.constants import POSTGRESQL_SCHEMA from rasa.core.domain import Domain -from rasa.core.events import SlotSet, ActionExecuted, Restarted +from rasa.core.events import ( + SlotSet, + ActionExecuted, + Restarted, + UserUttered, + SessionStarted, + BotUttered, + Event, +) from rasa.core.tracker_store import ( TrackerStore, InMemoryTrackerStore, RedisTrackerStore, SQLTrackerStore, + DynamoTrackerStore, + FailSafeTrackerStore, ) from rasa.core.trackers import DialogueStateTracker from rasa.utils.endpoints import EndpointConfig, read_endpoint_config -from tests.core.conftest import DEFAULT_ENDPOINTS_FILE +from tests.core.conftest import DEFAULT_ENDPOINTS_FILE, MockedMongoTrackerStore domain = Domain.load("data/test_domains/default.yml") -def test_get_or_create(): +def get_or_create_tracker_store(store: TrackerStore) -> None: slot_key = "location" slot_val = "Easter Island" - store = InMemoryTrackerStore(domain) tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID) ev = SlotSet(slot_key, slot_val) @@ -34,7 +61,38 @@ def test_get_or_create(): assert again.get_slot(slot_key) == slot_val -def test_restart_after_retrieval_from_tracker_store(default_domain): +def test_get_or_create(): + get_or_create_tracker_store(InMemoryTrackerStore(domain)) + + +# noinspection PyPep8Naming +@mock_dynamodb2 +def test_dynamo_get_or_create(): + get_or_create_tracker_store(DynamoTrackerStore(domain)) + + +@mock_dynamodb2 +def test_dynamo_tracker_floats(): + conversation_id = uuid.uuid4().hex + + tracker_store = DynamoTrackerStore(domain) + tracker = tracker_store.get_or_create_tracker( + conversation_id, append_action_listen=False + ) + + # save `slot` event with known `float`-type timestamp + timestamp = 13423.23434623 + tracker.update(SlotSet("key", "val", timestamp=timestamp)) + tracker_store.save(tracker) + + # retrieve tracker and the event timestamp is retrieved as a `float` + tracker = tracker_store.get_or_create_tracker(conversation_id) + retrieved_timestamp = tracker.events[0].timestamp + assert isinstance(retrieved_timestamp, float) + assert retrieved_timestamp == timestamp + + +def test_restart_after_retrieval_from_tracker_store(default_domain: Domain): store = InMemoryTrackerStore(default_domain) tr = store.get_or_create_tracker("myuser") synth = [ActionExecuted("action_listen") for _ in range(4)] @@ -51,7 +109,7 @@ def test_restart_after_retrieval_from_tracker_store(default_domain): assert latest_restart == latest_restart_after_loading -def test_tracker_store_remembers_max_history(default_domain): +def test_tracker_store_remembers_max_history(default_domain: Domain): store = InMemoryTrackerStore(default_domain) tr = store.get_or_create_tracker("myuser", max_event_history=42) tr.update(Restarted()) @@ -76,7 +134,7 @@ def test_tracker_store_endpoint_config_loading(): ) -def test_find_tracker_store(default_domain): +def test_create_tracker_store_from_endpoint_config(default_domain: Domain): store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store") tracker_store = RedisTrackerStore( domain=default_domain, @@ -87,57 +145,107 @@ def test_find_tracker_store(default_domain): record_exp=3000, ) - assert isinstance( - tracker_store, type(TrackerStore.find_tracker_store(default_domain, store)) - ) + assert isinstance(tracker_store, type(TrackerStore.create(store, default_domain))) + + +def test_exception_tracker_store_from_endpoint_config( + default_domain: Domain, monkeypatch: MonkeyPatch +): + """Check if tracker store properly handles exceptions. + + If we can not create a tracker store by instantiating the + expected type (e.g. due to an exception) we should fallback to + the default `InMemoryTrackerStore`.""" + + store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store") + mock = Mock(side_effect=Exception("test exception")) + monkeypatch.setattr(rasa.core.tracker_store, "RedisTrackerStore", mock) + with pytest.raises(Exception) as e: + TrackerStore.create(store, default_domain) -class ExampleTrackerStore(RedisTrackerStore): - def __init__(self, domain, url, port, db, password, record_exp): - super(ExampleTrackerStore, self).__init__( - domain, host=url, port=port, db=db, password=password, record_exp=record_exp + assert "test exception" in str(e.value) + + +class URLExampleTrackerStore(RedisTrackerStore): + def __init__(self, domain, url, port, db, password, record_exp, event_broker=None): + super().__init__( + domain, + event_broker=event_broker, + host=url, + port=port, + db=db, + password=password, + record_exp=record_exp, ) -def test_tracker_store_from_string(default_domain): +class HostExampleTrackerStore(RedisTrackerStore): + pass + + +def test_tracker_store_deprecated_url_argument_from_string(default_domain: Domain): endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml" store_config = read_endpoint_config(endpoints_path, "tracker_store") + store_config.type = "tests.core.test_tracker_stores.URLExampleTrackerStore" - tracker_store = TrackerStore.find_tracker_store(default_domain, store_config) + with pytest.raises(Exception): + TrackerStore.create(store_config, default_domain) - assert isinstance(tracker_store, ExampleTrackerStore) +def test_tracker_store_with_host_argument_from_string(default_domain: Domain): + endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml" + store_config = read_endpoint_config(endpoints_path, "tracker_store") + store_config.type = "tests.core.test_tracker_stores.HostExampleTrackerStore" -def test_tracker_store_from_invalid_module(default_domain): + with pytest.warns(None) as record: + tracker_store = TrackerStore.create(store_config, default_domain) + + assert len(record) == 0 + + assert isinstance(tracker_store, HostExampleTrackerStore) + + +def test_tracker_store_from_invalid_module(default_domain: Domain): endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml" store_config = read_endpoint_config(endpoints_path, "tracker_store") store_config.type = "a.module.which.cannot.be.found" - tracker_store = TrackerStore.find_tracker_store(default_domain, store_config) + with pytest.warns(UserWarning): + tracker_store = TrackerStore.create(store_config, default_domain) assert isinstance(tracker_store, InMemoryTrackerStore) -def test_tracker_store_from_invalid_string(default_domain): +def test_tracker_store_from_invalid_string(default_domain: Domain): endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml" store_config = read_endpoint_config(endpoints_path, "tracker_store") store_config.type = "any string" - tracker_store = TrackerStore.find_tracker_store(default_domain, store_config) + with pytest.warns(UserWarning): + tracker_store = TrackerStore.create(store_config, default_domain) assert isinstance(tracker_store, InMemoryTrackerStore) -def test_tracker_serialisation(): - slot_key = "location" - slot_val = "Easter Island" +def _tracker_store_and_tracker_with_slot_set() -> Tuple[ + InMemoryTrackerStore, DialogueStateTracker +]: + # returns an InMemoryTrackerStore containing a tracker with a slot set - store = InMemoryTrackerStore(domain) + slot_key = "cuisine" + slot_val = "French" + store = InMemoryTrackerStore(domain) tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID) ev = SlotSet(slot_key, slot_val) tracker.update(ev) + return store, tracker + + +def test_tracker_serialisation(): + store, tracker = _tracker_store_and_tracker_with_slot_set() serialised = store.serialise_tracker(tracker) assert tracker == store.deserialise_tracker( @@ -145,15 +253,41 @@ def test_tracker_serialisation(): ) +def test_deprecated_pickle_deserialisation(caplog: LogCaptureFixture): + def pickle_serialise_tracker(_tracker): + # mocked version of TrackerStore.serialise_tracker() that uses + # the deprecated pickle serialisation + import pickle + + dialogue = _tracker.as_dialogue() + + return pickle.dumps(dialogue) + + store, tracker = _tracker_store_and_tracker_with_slot_set() + + serialised = pickle_serialise_tracker(tracker) + + # deprecation warning should be emitted + + caplog.clear() # avoid counting debug messages + with caplog.at_level(logging.WARNING): + assert tracker == store.deserialise_tracker( + UserMessage.DEFAULT_SENDER_ID, serialised + ) + assert len(caplog.records) == 1 + assert "Deserialisation of pickled trackers will be deprecated" in caplog.text + + @pytest.mark.parametrize( "full_url", [ "postgresql://localhost", "postgresql://localhost:5432", "postgresql://user:secret@localhost", + "sqlite:///", ], ) -def test_get_db_url_with_fully_specified_url(full_url): +def test_get_db_url_with_fully_specified_url(full_url: Text): assert SQLTrackerStore.get_db_url(host=full_url) == full_url @@ -162,14 +296,18 @@ def test_get_db_url_with_port_in_host(): dialect = "postgresql" db = "mydb" - expected = "{}://{}/{}".format(dialect, host, db) + expected = f"{dialect}://{host}/{db}" assert ( - str(SQLTrackerStore.get_db_url(dialect="postgresql", host=host, db=db)) - == expected + str(SQLTrackerStore.get_db_url(dialect=dialect, host=host, db=db)) == expected ) +def test_db_get_url_with_sqlite(): + expected = "sqlite:///rasa.db" + assert str(SQLTrackerStore.get_db_url(dialect="sqlite", db="rasa.db")) == expected + + def test_get_db_url_with_correct_host(): expected = "postgresql://localhost:5005/mydb" @@ -200,7 +338,25 @@ def test_get_db_url_with_query(): ) -def test_db_url_with_query_from_endpoint_config(): +def test_sql_tracker_store_logs_do_not_show_password(caplog: LogCaptureFixture): + dialect = "postgresql" + host = "localhost" + port = 9901 + db = "some-database" + username = "db-user" + password = "some-password" + + with caplog.at_level(logging.DEBUG): + _ = SQLTrackerStore(None, dialect, host, port, db, username, password) + + # the URL in the logs does not contain the password + assert password not in caplog.text + + # instead the password is displayed as '***' + assert f"postgresql://{username}:***@{host}:{port}/{db}" in caplog.text + + +def test_db_url_with_query_from_endpoint_config(tmp_path: Path): endpoint_config = """ tracker_store: dialect: postgresql @@ -213,11 +369,9 @@ def test_db_url_with_query_from_endpoint_config(): driver: my-driver another: query """ - - with tempfile.NamedTemporaryFile("w+", suffix="_tmp_config_file.yml") as f: - f.write(endpoint_config) - f.flush() - store_config = read_endpoint_config(f.name, "tracker_store") + f = tmp_path / "tmp_config_file.yml" + f.write_text(endpoint_config) + store_config = read_endpoint_config(str(f), "tracker_store") url = SQLTrackerStore.get_db_url(**store_config.kwargs) @@ -231,3 +385,451 @@ def test_db_url_with_query_from_endpoint_config(): itertools.permutations(("another=query", "driver=my-driver")) ) ) + + +def test_fail_safe_tracker_store_if_no_errors(): + mocked_tracker_store = Mock() + + tracker_store = FailSafeTrackerStore(mocked_tracker_store, None) + + # test save + mocked_tracker_store.save = Mock() + tracker_store.save(None) + mocked_tracker_store.save.assert_called_once() + + # test retrieve + expected = [1] + mocked_tracker_store.retrieve = Mock(return_value=expected) + sender_id = "10" + assert tracker_store.retrieve(sender_id) == expected + mocked_tracker_store.retrieve.assert_called_once_with(sender_id) + + # test keys + expected = ["sender 1", "sender 2"] + mocked_tracker_store.keys = Mock(return_value=expected) + assert tracker_store.keys() == expected + mocked_tracker_store.keys.assert_called_once() + + +def test_fail_safe_tracker_store_with_save_error(): + mocked_tracker_store = Mock() + mocked_tracker_store.save = Mock(side_effect=Exception()) + + fallback_tracker_store = Mock() + fallback_tracker_store.save = Mock() + + on_error_callback = Mock() + + tracker_store = FailSafeTrackerStore( + mocked_tracker_store, on_error_callback, fallback_tracker_store + ) + tracker_store.save(None) + + fallback_tracker_store.save.assert_called_once() + on_error_callback.assert_called_once() + + +def test_fail_safe_tracker_store_with_keys_error(): + mocked_tracker_store = Mock() + mocked_tracker_store.keys = Mock(side_effect=Exception()) + + on_error_callback = Mock() + + tracker_store = FailSafeTrackerStore(mocked_tracker_store, on_error_callback) + assert tracker_store.keys() == [] + on_error_callback.assert_called_once() + + +def test_fail_safe_tracker_store_with_retrieve_error(): + mocked_tracker_store = Mock() + mocked_tracker_store.retrieve = Mock(side_effect=Exception()) + + fallback_tracker_store = Mock() + on_error_callback = Mock() + + tracker_store = FailSafeTrackerStore( + mocked_tracker_store, on_error_callback, fallback_tracker_store + ) + + assert tracker_store.retrieve("sender_id") is None + on_error_callback.assert_called_once() + + +def test_set_fail_safe_tracker_store_domain(default_domain: Domain): + tracker_store = InMemoryTrackerStore(domain) + fallback_tracker_store = InMemoryTrackerStore(None) + failsafe_store = FailSafeTrackerStore(tracker_store, None, fallback_tracker_store) + + failsafe_store.domain = default_domain + assert failsafe_store.domain is default_domain + assert tracker_store.domain is failsafe_store.domain + assert fallback_tracker_store.domain is failsafe_store.domain + + +def create_tracker_with_partially_saved_events( + tracker_store: TrackerStore, +) -> Tuple[List[Event], DialogueStateTracker]: + # creates a tracker with two events and saved it to the tracker store + # following that, it adds three more events that are not saved to the tracker store + sender_id = uuid.uuid4().hex + + # create tracker with two events and save it + events = [UserUttered("hello"), BotUttered("what")] + tracker = DialogueStateTracker.from_events(sender_id, events) + tracker_store.save(tracker) + + # add more events to the tracker, do not yet save it + events = [ActionExecuted(ACTION_LISTEN_NAME), UserUttered("123"), BotUttered("yes")] + for event in events: + tracker.update(event) + + return events, tracker + + +def _saved_tracker_with_multiple_session_starts( + tracker_store: TrackerStore, sender_id: Text +) -> DialogueStateTracker: + tracker = DialogueStateTracker.from_events( + sender_id, + [ + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + UserUttered("hi"), + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + ], + ) + + tracker_store.save(tracker) + return tracker_store.retrieve(sender_id) + + +def test_mongo_additional_events(default_domain: Domain): + tracker_store = MockedMongoTrackerStore(default_domain) + events, tracker = create_tracker_with_partially_saved_events(tracker_store) + + # make sure only new events are returned + # noinspection PyProtectedMember + assert list(tracker_store._additional_events(tracker)) == events + + +def test_mongo_additional_events_with_session_start(default_domain: Domain): + sender = "test_mongo_additional_events_with_session_start" + tracker_store = MockedMongoTrackerStore(default_domain) + tracker = _saved_tracker_with_multiple_session_starts(tracker_store, sender) + + tracker.update(UserUttered("hi2")) + + # noinspection PyProtectedMember + additional_events = list(tracker_store._additional_events(tracker)) + + assert len(additional_events) == 1 + assert isinstance(additional_events[0], UserUttered) + + +# we cannot parametrise over this and the previous test due to the different ways of +# calling _additional_events() +def test_sql_additional_events(default_domain: Domain): + tracker_store = SQLTrackerStore(default_domain) + additional_events, tracker = create_tracker_with_partially_saved_events( + tracker_store + ) + + # make sure only new events are returned + with tracker_store.session_scope() as session: + # noinspection PyProtectedMember + assert ( + list(tracker_store._additional_events(session, tracker)) + == additional_events + ) + + +def test_sql_additional_events_with_session_start(default_domain: Domain): + sender = "test_sql_additional_events_with_session_start" + tracker_store = SQLTrackerStore(default_domain) + tracker = _saved_tracker_with_multiple_session_starts(tracker_store, sender) + + tracker.update(UserUttered("hi2"), default_domain) + + # make sure only new events are returned + with tracker_store.session_scope() as session: + # noinspection PyProtectedMember + additional_events = list(tracker_store._additional_events(session, tracker)) + assert len(additional_events) == 1 + assert isinstance(additional_events[0], UserUttered) + + +@pytest.mark.parametrize( + "tracker_store_type,tracker_store_kwargs", + [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], +) +def test_tracker_store_retrieve_with_session_started_events( + tracker_store_type: Type[TrackerStore], + tracker_store_kwargs: Dict, + default_domain: Domain, +): + tracker_store = tracker_store_type(default_domain, **tracker_store_kwargs) + events = [ + UserUttered("Hola", {"name": "greet"}, timestamp=1), + BotUttered("Hi", timestamp=2), + SessionStarted(timestamp=3), + UserUttered("Ciao", {"name": "greet"}, timestamp=4), + ] + sender_id = "test_sql_tracker_store_with_session_events" + tracker = DialogueStateTracker.from_events(sender_id, events) + tracker_store.save(tracker) + + # Save other tracker to ensure that we don't run into problems with other senders + other_tracker = DialogueStateTracker.from_events("other-sender", [SessionStarted()]) + tracker_store.save(other_tracker) + + # Retrieve tracker with events since latest SessionStarted + tracker = tracker_store.retrieve(sender_id) + + assert len(tracker.events) == 2 + assert all((event == tracker.events[i] for i, event in enumerate(events[2:]))) + + +@pytest.mark.parametrize( + "tracker_store_type,tracker_store_kwargs", + [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], +) +def test_tracker_store_retrieve_without_session_started_events( + tracker_store_type: Type[TrackerStore], + tracker_store_kwargs: Dict, + default_domain: Domain, +): + tracker_store = tracker_store_type(default_domain, **tracker_store_kwargs) + + # Create tracker with a SessionStarted event + events = [ + UserUttered("Hola", {"name": "greet"}), + BotUttered("Hi"), + UserUttered("Ciao", {"name": "greet"}), + BotUttered("Hi2"), + ] + + sender_id = "test_sql_tracker_store_retrieve_without_session_started_events" + tracker = DialogueStateTracker.from_events(sender_id, events) + tracker_store.save(tracker) + + # Save other tracker to ensure that we don't run into problems with other senders + other_tracker = DialogueStateTracker.from_events("other-sender", [SessionStarted()]) + tracker_store.save(other_tracker) + + tracker = tracker_store.retrieve(sender_id) + + assert len(tracker.events) == 4 + assert all(event == tracker.events[i] for i, event in enumerate(events)) + + +@pytest.mark.parametrize( + "tracker_store_type,tracker_store_kwargs", + [ + (MockedMongoTrackerStore, {}), + (SQLTrackerStore, {"host": "sqlite:///"}), + (InMemoryTrackerStore, {}), + ], +) +def test_tracker_store_retrieve_with_events_from_previous_sessions( + tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict +): + tracker_store = tracker_store_type(Domain.empty(), **tracker_store_kwargs) + tracker_store.load_events_from_previous_conversation_sessions = True + + conversation_id = uuid.uuid4().hex + tracker = DialogueStateTracker.from_events( + conversation_id, + [ + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + UserUttered("hi"), + ActionExecuted(ACTION_SESSION_START_NAME), + SessionStarted(), + ], + ) + tracker_store.save(tracker) + + actual = tracker_store.retrieve(conversation_id) + + assert len(actual.events) == len(tracker.events) + + +def test_session_scope_error( + monkeypatch: MonkeyPatch, capsys: CaptureFixture, default_domain: Domain +): + tracker_store = SQLTrackerStore(default_domain) + tracker_store.sessionmaker = Mock() + + requested_schema = uuid.uuid4().hex + + # `ensure_schema_exists()` raises `ValueError` + mocked_ensure_schema_exists = Mock(side_effect=ValueError(requested_schema)) + monkeypatch.setattr( + rasa.core.tracker_store, "ensure_schema_exists", mocked_ensure_schema_exists + ) + + # `SystemExit` is triggered by failing `ensure_schema_exists()` + with pytest.raises(SystemExit): + with tracker_store.session_scope() as _: + pass + + # error message is printed + assert ( + f"Requested PostgreSQL schema '{requested_schema}' was not found in the " + f"database." in capsys.readouterr()[0] + ) + + +@pytest.mark.parametrize( + "url,is_postgres_url", + [ + (f"{PGDialect.name}://admin:pw@localhost:5432/rasa", True), + (f"{SQLiteDialect.name}:///", False), + (URL(PGDialect.name), True), + (URL(SQLiteDialect.name), False), + ], +) +def test_is_postgres_url(url: Union[Text, URL], is_postgres_url: bool): + assert rasa.core.tracker_store.is_postgresql_url(url) == is_postgres_url + + +def set_or_delete_postgresql_schema_env_var( + monkeypatch: MonkeyPatch, value: Optional[Text] +) -> None: + """Set `POSTGRESQL_SCHEMA` environment variable using `MonkeyPatch`. + + Args: + monkeypatch: Instance of `MonkeyPatch` to use for patching. + value: Value of the `POSTGRESQL_SCHEMA` environment variable to set. + """ + if value is None: + monkeypatch.delenv(POSTGRESQL_SCHEMA, raising=False) + else: + monkeypatch.setenv(POSTGRESQL_SCHEMA, value) + + +@pytest.mark.parametrize( + "url,schema_env,kwargs", + [ + # postgres without schema + ( + f"{PGDialect.name}://admin:pw@localhost:5432/rasa", + None, + { + "pool_size": rasa.core.tracker_store.POSTGRESQL_DEFAULT_POOL_SIZE, + "max_overflow": rasa.core.tracker_store.POSTGRESQL_DEFAULT_MAX_OVERFLOW, + }, + ), + # postgres with schema + ( + f"{PGDialect.name}://admin:pw@localhost:5432/rasa", + "schema1", + { + "connect_args": {"options": "-csearch_path=schema1"}, + "pool_size": rasa.core.tracker_store.POSTGRESQL_DEFAULT_POOL_SIZE, + "max_overflow": rasa.core.tracker_store.POSTGRESQL_DEFAULT_MAX_OVERFLOW, + }, + ), + # oracle without schema + (f"{OracleDialect.name}://admin:pw@localhost:5432/rasa", None, {}), + # oracle with schema + (f"{OracleDialect.name}://admin:pw@localhost:5432/rasa", "schema1", {}), + # sqlite + (f"{SQLiteDialect.name}:///", None, {}), + ], +) +def test_create_engine_kwargs( + monkeypatch: MonkeyPatch, + url: Union[Text, URL], + schema_env: Optional[Text], + kwargs: Dict[Text, Dict[Text, Union[Text, int]]], +): + set_or_delete_postgresql_schema_env_var(monkeypatch, schema_env) + + assert rasa.core.tracker_store.create_engine_kwargs(url) == kwargs + + +@contextmanager +def does_not_raise(): + """Contextmanager to be used when an expression is not expected to raise an + exception. + + This contextmanager can be used in parametrized tests, where some input objects + are expected to raise and others are not. + + Example: + + @pytest.mark.parametrize( + "a,b,raises_context", + [ + # 5/6 is a legal divison + (5, 6, does_not_raise()), + # 5/0 raises a `ZeroDivisionError` + (5, 0, pytest.raises(ZeroDivisionError)), + ], + ) + def test_divide( + a: int, b: int, raises_context: ContextManager, + ): + with raises_context: + _ = a / b + + """ + yield + + +@pytest.mark.parametrize( + "is_postgres,schema_env,schema_exists,raises_context", + [ + (True, "schema1", True, does_not_raise()), + (True, "schema1", False, pytest.raises(ValueError)), + (False, "schema1", False, does_not_raise()), + (True, None, False, does_not_raise()), + (False, None, False, does_not_raise()), + ], +) +def test_ensure_schema_exists( + monkeypatch: MonkeyPatch, + is_postgres: bool, + schema_env: Optional[Text], + schema_exists: bool, + raises_context: ContextManager, +): + set_or_delete_postgresql_schema_env_var(monkeypatch, schema_env) + monkeypatch.setattr( + rasa.core.tracker_store, "is_postgresql_url", lambda _: is_postgres + ) + monkeypatch.setattr(sqlalchemy, "exists", Mock()) + + # mock the `session.query().scalar()` query which returns whether the schema + # exists in the db + scalar = Mock(return_value=schema_exists) + query = Mock(scalar=scalar) + session = Mock() + session.query = Mock(return_value=query) + + with raises_context: + rasa.core.tracker_store.ensure_schema_exists(session) + + +def test_current_state_without_events(default_domain: Domain): + tracker_store = MockedMongoTrackerStore(default_domain) + + # insert some events + events = [ + UserUttered("Hola", {"name": "greet"}), + BotUttered("Hi"), + UserUttered("Ciao", {"name": "greet"}), + BotUttered("Hi2"), + ] + + sender_id = "test_mongo_tracker_store_current_state_without_events" + tracker = DialogueStateTracker.from_events(sender_id, events) + + # get current state without events + # noinspection PyProtectedMember + state = tracker_store._current_tracker_state_without_events(tracker) + + # `events` key should not be in there + assert state and "events" not in state diff --git a/tests/core/test_trackers.py b/tests/core/test_trackers.py index 92271cb1ee19..78888eec0745 100644 --- a/tests/core/test_trackers.py +++ b/tests/core/test_trackers.py @@ -1,16 +1,17 @@ -import asyncio import json import logging import os import tempfile +from typing import List, Text, Dict, Any, Type import fakeredis import pytest import rasa.utils.io from rasa.core import training, restore -from rasa.core import utils -from rasa.core.actions.action import ACTION_LISTEN_NAME +from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME +from rasa.core.agent import Agent +from rasa.core.constants import REQUESTED_SLOT from rasa.core.domain import Domain from rasa.core.events import ( SlotSet, @@ -19,7 +20,14 @@ Restarted, ActionReverted, UserUtteranceReverted, + SessionStarted, + Event, + ActiveLoop, + ActionExecutionRejected, + BotUttered, + LegacyForm, ) +from rasa.core.slots import FloatSlot, BooleanSlot, ListSlot, TextSlot, DataSlot, Slot from rasa.core.tracker_store import ( InMemoryTrackerStore, RedisTrackerStore, @@ -27,7 +35,12 @@ ) from rasa.core.tracker_store import TrackerStore from rasa.core.trackers import DialogueStateTracker, EventVerbosity -from tests.core.conftest import DEFAULT_STORIES_FILE, EXAMPLE_DOMAINS, TEST_DIALOGUES +from tests.core.conftest import ( + DEFAULT_STORIES_FILE, + EXAMPLE_DOMAINS, + TEST_DIALOGUES, + MockedMongoTrackerStore, +) from tests.core.utilities import ( tracker_from_dialogue_file, read_dialogue_file, @@ -38,24 +51,15 @@ domain = Domain.load("examples/moodbot/domain.yml") -@pytest.fixture(scope="module") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() - - class MockRedisTrackerStore(RedisTrackerStore): - def __init__(self, domain): + def __init__(self, _domain: Domain) -> None: self.red = fakeredis.FakeStrictRedis() self.record_exp = None # added in redis==3.3.0, but not yet in fakeredis self.red.connection_pool.connection_class.health_check_interval = 0 - TrackerStore.__init__(self, domain) + TrackerStore.__init__(self, _domain) def stores_to_be_tested(): @@ -64,11 +68,12 @@ def stores_to_be_tested(): MockRedisTrackerStore(domain), InMemoryTrackerStore(domain), SQLTrackerStore(domain, db=os.path.join(temp, "rasa.db")), + MockedMongoTrackerStore(domain), ] def stores_to_be_tested_ids(): - return ["redis-tracker", "in-memory-tracker", "SQL-tracker"] + return ["redis-tracker", "in-memory-tracker", "SQL-tracker", "mongo-tracker"] def test_tracker_duplicate(): @@ -124,7 +129,7 @@ def test_tracker_store(store, pair): assert restored == tracker -async def test_tracker_write_to_story(tmpdir, moodbot_domain): +async def test_tracker_write_to_story(tmpdir, moodbot_domain: Domain): tracker = tracker_from_dialogue_file( "data/test_dialogues/moodbot.json", moodbot_domain ) @@ -144,34 +149,41 @@ async def test_tracker_write_to_story(tmpdir, moodbot_domain): assert recovered.events[4].intent == {"confidence": 1.0, "name": "mood_unhappy"} -async def test_tracker_state_regression_without_bot_utterance(default_agent): +async def test_tracker_state_regression_without_bot_utterance(default_agent: Agent): sender_id = "test_tracker_state_regression_without_bot_utterance" for i in range(0, 2): - await default_agent.handle_message("/greet", sender_id=sender_id) + await default_agent.handle_text("/greet", sender_id=sender_id) tracker = default_agent.tracker_store.get_or_create_tracker(sender_id) # Ensures that the tracker has changed between the utterances # (and wasn't reset in between them) - expected = "action_listen;greet;utter_greet;action_listen;greet;action_listen" + expected = ( + "action_session_start;action_listen;greet;utter_greet;action_listen;" + "greet;utter_greet;action_listen" + ) assert ( ";".join([e.as_story_string() for e in tracker.events if e.as_story_string()]) == expected ) -async def test_tracker_state_regression_with_bot_utterance(default_agent): +async def test_tracker_state_regression_with_bot_utterance(default_agent: Agent): sender_id = "test_tracker_state_regression_with_bot_utterance" for i in range(0, 2): - await default_agent.handle_message("/greet", sender_id=sender_id) + await default_agent.handle_text("/greet", sender_id=sender_id) tracker = default_agent.tracker_store.get_or_create_tracker(sender_id) expected = [ + "action_session_start", + None, "action_listen", "greet", "utter_greet", None, "action_listen", "greet", + "utter_greet", + None, "action_listen", ] @@ -181,44 +193,93 @@ async def test_tracker_state_regression_with_bot_utterance(default_agent): async def test_bot_utterance_comes_after_action_event(default_agent): sender_id = "test_bot_utterance_comes_after_action_event" - await default_agent.handle_message("/greet", sender_id=sender_id) + await default_agent.handle_text("/greet", sender_id=sender_id) tracker = default_agent.tracker_store.get_or_create_tracker(sender_id) # important is, that the 'bot' comes after the second 'action' and not # before - expected = ["action", "user", "action", "bot", "action"] + expected = [ + "action", + "session_started", + "action", + "user", + "action", + "bot", + "action", + ] assert [e.type_name for e in tracker.events] == expected -def test_tracker_entity_retrieval(default_domain): +@pytest.mark.parametrize( + "entities, expected_values", + [ + ([{"value": "greet", "entity": "entity_name"}], ["greet"]), + ( + [ + {"value": "greet", "entity": "entity_name"}, + {"value": "bye", "entity": "other"}, + ], + ["greet"], + ), + ( + [ + {"value": "greet", "entity": "entity_name"}, + {"value": "bye", "entity": "entity_name"}, + ], + ["greet", "bye"], + ), + ( + [ + {"value": "greet", "entity": "entity_name", "role": "role"}, + {"value": "bye", "entity": "entity_name"}, + ], + ["greet"], + ), + ( + [ + {"value": "greet", "entity": "entity_name", "group": "group"}, + {"value": "bye", "entity": "entity_name"}, + ], + ["greet"], + ), + ( + [ + {"value": "greet", "entity": "entity_name"}, + {"value": "bye", "entity": "entity_name", "group": "group"}, + ], + ["greet", "bye"], + ), + ], +) +def test_get_latest_entity_values( + entities: List[Dict[Text, Any]], expected_values: List[Text], default_domain: Domain +): + entity_type = entities[0].get("entity") + entity_role = entities[0].get("role") + entity_group = entities[0].get("group") + tracker = DialogueStateTracker("default", default_domain.slots) # the retrieved tracker should be empty assert len(tracker.events) == 0 - assert list(tracker.get_latest_entity_values("entity_name")) == [] + assert list(tracker.get_latest_entity_values(entity_type)) == [] intent = {"name": "greet", "confidence": 1.0} - tracker.update( - UserUttered( - "/greet", - intent, - [ - { - "start": 1, - "end": 5, - "value": "greet", - "entity": "entity_name", - "extractor": "manual", - } - ], + tracker.update(UserUttered("/greet", intent, entities)) + + assert ( + list( + tracker.get_latest_entity_values( + entity_type, entity_role=entity_role, entity_group=entity_group + ) ) + == expected_values ) - assert list(tracker.get_latest_entity_values("entity_name")) == ["greet"] assert list(tracker.get_latest_entity_values("unknown")) == [] -def test_tracker_update_slots_with_entity(default_domain): +def test_tracker_update_slots_with_entity(default_domain: Domain): tracker = DialogueStateTracker("default", default_domain.slots) test_entity = default_domain.entities[0] @@ -245,7 +306,7 @@ def test_tracker_update_slots_with_entity(default_domain): assert tracker.get_slot(test_entity) == expected_slot_value -def test_restart_event(default_domain): +def test_restart_event(default_domain: Domain): tracker = DialogueStateTracker("default", default_domain.slots) # the retrieved tracker should be empty assert len(tracker.events) == 0 @@ -279,7 +340,19 @@ def test_restart_event(default_domain): assert len(list(recovered.generate_all_prior_trackers())) == 1 -def test_revert_action_event(default_domain): +def test_session_start(default_domain: Domain): + tracker = DialogueStateTracker("default", default_domain.slots) + # the retrieved tracker should be empty + assert len(tracker.events) == 0 + + # add a SessionStarted event + tracker.update(SessionStarted()) + + # tracker has one event + assert len(tracker.events) == 1 + + +def test_revert_action_event(default_domain: Domain): tracker = DialogueStateTracker("default", default_domain.slots) # the retrieved tracker should be empty assert len(tracker.events) == 0 @@ -315,7 +388,7 @@ def test_revert_action_event(default_domain): assert len(list(tracker.generate_all_prior_trackers())) == 3 -def test_revert_user_utterance_event(default_domain): +def test_revert_user_utterance_event(default_domain: Domain): tracker = DialogueStateTracker("default", default_domain.slots) # the retrieved tracker should be empty assert len(tracker.events) == 0 @@ -357,7 +430,7 @@ def test_revert_user_utterance_event(default_domain): assert len(list(tracker.generate_all_prior_trackers())) == 3 -def test_traveling_back_in_time(default_domain): +def test_traveling_back_in_time(default_domain: Domain): tracker = DialogueStateTracker("default", default_domain.slots) # the retrieved tracker should be empty assert len(tracker.events) == 0 @@ -399,7 +472,7 @@ async def test_dump_and_restore_as_json(default_agent, tmpdir_factory): out_path = tmpdir_factory.mktemp("tracker").join("dumped_tracker.json") dumped = tracker.current_state(EventVerbosity.AFTER_RESTART) - utils.dump_obj_as_json_to_file(out_path.strpath, dumped) + rasa.utils.io.dump_obj_as_json_to_file(out_path.strpath, dumped) restored_tracker = restore.load_tracker_from_json( out_path.strpath, default_agent.domain @@ -408,7 +481,7 @@ async def test_dump_and_restore_as_json(default_agent, tmpdir_factory): assert restored_tracker == tracker -def test_read_json_dump(default_agent): +def test_read_json_dump(default_agent: Agent): tracker_dump = "data/test_trackers/tracker_moodbot.json" tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump)) @@ -498,11 +571,32 @@ def test_current_state_applied_events(default_agent): assert state.get("events") == applied_events +def test_session_started_not_part_of_applied_events(default_agent: Agent): + # take tracker dump and insert a SessionStarted event sequence + tracker_dump = "data/test_trackers/tracker_moodbot.json" + tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump)) + tracker_json["events"].insert( + 4, {"event": ActionExecuted.type_name, "name": ACTION_SESSION_START_NAME} + ) + tracker_json["events"].insert(5, {"event": SessionStarted.type_name}) + + # initialise a tracker from this list of events + tracker = DialogueStateTracker.from_dict( + tracker_json.get("sender_id"), + tracker_json.get("events", []), + default_agent.domain.slots, + ) + + # the SessionStart event was at index 5, the tracker's `applied_events()` should + # be the same as the list of events from index 6 onwards + assert tracker.applied_events() == list(tracker.events)[6:] + + async def test_tracker_dump_e2e_story(default_agent): sender_id = "test_tracker_dump_e2e_story" - await default_agent.handle_message("/greet", sender_id=sender_id) - await default_agent.handle_message("/goodbye", sender_id=sender_id) + await default_agent.handle_text("/greet", sender_id=sender_id) + await default_agent.handle_text("/goodbye", sender_id=sender_id) tracker = default_agent.tracker_store.get_or_create_tracker(sender_id) story = tracker.export_stories(e2e=True) @@ -575,6 +669,21 @@ def test_last_executed_has_not_name(): assert tracker.last_executed_action_has("another") is False +def test_events_metadata(): + # It should be possible to attach arbitrary metadata to any event and then + # retrieve it after getting the tracker dict representation. + events = [ + ActionExecuted("one", metadata={"one": 1}), + user_uttered("two", 1, metadata={"two": 2}), + ActionExecuted(ACTION_LISTEN_NAME, metadata={"three": 3}), + ] + + events = get_tracker(events).current_state(EventVerbosity.ALL)["events"] + assert events[0]["metadata"] == {"one": 1} + assert events[1]["metadata"] == {"two": 2} + assert events[2]["metadata"] == {"three": 3} + + @pytest.mark.parametrize("key, value", [("asfa", 1), ("htb", None)]) def test_tracker_without_slots(key, value, caplog): event = SlotSet(key, value) @@ -585,3 +694,420 @@ def test_tracker_without_slots(key, value, caplog): v = tracker.get_slot(key) assert v == value assert len(caplog.records) == 0 + + +@pytest.mark.parametrize( + "slot_type, initial_value, value_to_set", + [ + (FloatSlot, 4.234, 2.5), + (BooleanSlot, True, False), + (ListSlot, [1, 2, 3], [4, 5, 6]), + (TextSlot, "some string", "another string"), + (DataSlot, {"a": "nice dict"}, {"b": "better dict"}), + ], +) +def test_tracker_does_not_modify_slots( + slot_type: Type[Slot], initial_value: Any, value_to_set: Any +): + slot_name = "some-slot" + slot = slot_type(slot_name, initial_value) + tracker = DialogueStateTracker("some-conversation-id", [slot]) + + # change the slot value in the tracker + tracker._set_slot(slot_name, value_to_set) + + # assert that the tracker contains the slot with the modified value + assert tracker.get_slot(slot_name) == value_to_set + + # assert that the initial slot has not been affected + assert slot.value == initial_value + + +@pytest.mark.parametrize( + "events, expected_applied_events", + [ + ( + [ + # Form gets triggered. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill_whole_form"), + # Form executes and fills slots. + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet("slot1", "value"), + SlotSet("slot2", "value2"), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill_whole_form"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet("slot1", "value"), + SlotSet("slot2", "value2"), + ], + ), + ( + [ + # Form gets triggered. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill_whole_form"), + # Form executes and fills all slots right away. Form finishes. + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet("slot1", "value"), + SlotSet("slot2", "value2"), + ActiveLoop(None), + # Form is done. Regular conversation continues. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("intent outside form"), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill_whole_form"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet("slot1", "value"), + SlotSet("slot2", "value2"), + ActiveLoop(None), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("intent outside form"), + ], + ), + ( + [ + # Form gets triggered. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + # Form executes and requests slot. + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + # User fills slot. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("bye"), + # Form deactivates after all slots are finished. + ActionExecuted("loop"), + SlotSet("slot", "value"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + SlotSet("slot", "value"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + ), + ( + [ + # Form was executed before and finished. + ActionExecuted("loop"), + ActiveLoop(None), + # Form gets triggered again (for whatever reason).. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + # Form executes and requests slot. + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + # User fills slot. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("bye"), + # Form deactivates after all slots are finished. + ActionExecuted("loop"), + SlotSet("slot", "value"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + [ + ActionExecuted("loop"), + ActiveLoop(None), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + SlotSet("slot", "value"), + ActiveLoop(None), + SlotSet(REQUESTED_SLOT, None), + ], + ), + ( + [ + user_uttered("trigger form"), + ActionExecuted("form"), + ActiveLoop("form"), + SlotSet(REQUESTED_SLOT, "some slot"), + BotUttered("ask slot"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill requested slots"), + SlotSet("some slot", "value"), + ActionExecuted("form"), + SlotSet("some slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + [ + user_uttered("trigger form"), + ActionExecuted("form"), + ActiveLoop("form"), + SlotSet(REQUESTED_SLOT, "some slot"), + BotUttered("ask slot"), + SlotSet("some slot", "value"), + SlotSet("some slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + ], +) +def test_applied_events_with_loop_happy_path( + events: List[Event], expected_applied_events: List[Event] +): + tracker = DialogueStateTracker.from_events("👋", events) + applied = tracker.applied_events() + + assert applied == expected_applied_events + + +@pytest.mark.parametrize( + "events, expected_applied_events", + [ + ( + [ + # Form is triggered and requests slot. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + # User sends chitchat instead of answering form. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Form rejected execution. + ActionExecutionRejected("loop"), + # Action which deals with unhappy path. + ActionExecuted("handling chitchat"), + # We immediately return to form after executing an action to handle it. + ActionExecuted("loop"), + # Form happy path continues until all slots are filled. + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill slots"), + ActionExecuted("loop"), + SlotSet("slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + ActionExecutionRejected("loop"), + ActionExecuted("handling chitchat"), + ActionExecuted("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + SlotSet("slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + ( + [ + # Form gets triggered and requests slots. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + # User sends chitchat instead of answering form. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Form rejected execution. + ActionExecutionRejected("loop"), + # Unhappy path kicks in. + ActionExecuted("ask if continue"), + ActionExecuted(ACTION_LISTEN_NAME), + # User decides to fill form eventually. + user_uttered("I want to continue with form"), + ActionExecuted("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("fill slots"), + ActionExecuted("loop"), + SlotSet("slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + ActionExecutionRejected("loop"), + ActionExecuted("ask if continue"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("I want to continue with form"), + ActionExecuted("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + SlotSet("slot", "value"), + SlotSet(REQUESTED_SLOT, None), + ActiveLoop(None), + ], + ), + ( + [ + # Form gets triggered and requests slots. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + # User sends chitchat instead of answering form. + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Form rejected execution. + ActionExecutionRejected("loop"), + # Unhappy path kicks in. + ActionExecuted("ask if continue"), + ActionExecuted(ACTION_LISTEN_NAME), + # User wants to quit form. + user_uttered("Stop the form"), + ActionExecuted("some action"), + ActiveLoop(None), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("outside the form"), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + ActionExecutionRejected("loop"), + ActionExecuted("ask if continue"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("Stop the form"), + ActionExecuted("some action"), + ActiveLoop(None), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("outside the form"), + ], + ), + ( + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Different action than form action after chitchat. + # This indicates we are in an unhappy path. + ActionExecuted("handle_chitchat"), + ActionExecuted("loop"), + ActiveLoop("loop"), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + SlotSet(REQUESTED_SLOT, "bla"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Different action than form action after chitchat. + # This indicates we are in an unhappy path. + ActionExecuted("handle_chitchat"), + ActionExecuted("loop"), + ActiveLoop("loop"), + ], + ), + ( + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + ActionExecuted("handle_chitchat"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("affirm"), + ActionExecuted("loop"), + ], + [ + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("greet"), + ActionExecuted("loop"), + ActiveLoop("loop"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("chitchat"), + # Different action than form action indicates unhappy path + ActionExecuted("handle_chitchat"), + ActionExecuted(ACTION_LISTEN_NAME), + user_uttered("affirm"), + ActionExecuted("loop"), + ], + ), + ], +) +def test_applied_events_with_loop_unhappy_path( + events: List[Event], expected_applied_events: List[Event] +): + tracker = DialogueStateTracker.from_events("👋", events) + applied = tracker.applied_events() + + assert applied == expected_applied_events + + +def test_reading_of_trackers_with_legacy_form_events(): + loop_name1 = "my loop" + loop_name2 = "my form" + tracker = DialogueStateTracker.from_dict( + "sender", + events_as_dict=[ + {"event": ActiveLoop.type_name, "name": loop_name1}, + {"event": LegacyForm.type_name, "name": None}, + {"event": LegacyForm.type_name, "name": loop_name2}, + ], + ) + + expected_events = [ActiveLoop(loop_name1), LegacyForm(None), LegacyForm(loop_name2)] + assert list(tracker.events) == expected_events + assert tracker.active_loop["name"] == loop_name2 + + +def test_writing_trackers_with_legacy_form_events(): + loop_name = "my loop" + tracker = DialogueStateTracker.from_events( + "sender", evts=[ActiveLoop(loop_name), LegacyForm(None), LegacyForm("some")] + ) + + events_as_dict = [event.as_dict() for event in tracker.events] + + for event in events_as_dict: + assert event["event"] == ActiveLoop.type_name + + +def test_change_form_to_deprecation_warning(): + tracker = DialogueStateTracker.from_events("conversation", evts=[]) + new_form = "new form" + with pytest.warns(DeprecationWarning): + tracker.change_form_to(new_form) + + assert tracker.active_loop_name() == new_form diff --git a/tests/core/test_training.py b/tests/core/test_training.py index 2e7e5b8b86ea..08284c82e374 100644 --- a/tests/core/test_training.py +++ b/tests/core/test_training.py @@ -1,20 +1,33 @@ +from pathlib import Path +from typing import List, Text +from unittest.mock import Mock + import pytest +from _pytest.monkeypatch import MonkeyPatch -from rasa.core.interpreter import RegexInterpreter +from rasa.core.domain import Domain +from rasa.core.interpreter import RegexInterpreter, RasaNLUInterpreter from rasa.core.train import train from rasa.core.agent import Agent from rasa.core.policies.form_policy import FormPolicy -from rasa.core.training.dsl import StoryFileReader from rasa.core.training.visualization import visualize_stories from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE -async def test_story_visualization(default_domain, tmpdir): - story_steps = await StoryFileReader.read_from_file( +@pytest.mark.parametrize( + "stories_file", + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], +) +async def test_story_visualization( + stories_file: Text, default_domain: Domain, tmp_path: Path +): + import rasa.core.training.loading as core_loading + + story_steps = await core_loading.load_data_from_resource( "data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter() ) - out_file = tmpdir.join("graph.html").strpath + out_file = str(tmp_path / "graph.html") generated_graph = await visualize_stories( story_steps, default_domain, @@ -28,9 +41,17 @@ async def test_story_visualization(default_domain, tmpdir): assert len(generated_graph.edges()) == 56 -async def test_story_visualization_with_merging(default_domain): - story_steps = await StoryFileReader.read_from_file( - "data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter() +@pytest.mark.parametrize( + "stories_file", + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], +) +async def test_story_visualization_with_merging( + stories_file: Text, default_domain: Domain +): + import rasa.core.training.loading as core_loading + + story_steps = await core_loading.load_data_from_resource( + stories_file, default_domain, interpreter=RegexInterpreter() ) generated_graph = await visualize_stories( story_steps, @@ -44,29 +65,30 @@ async def test_story_visualization_with_merging(default_domain): assert 20 < len(generated_graph.edges()) < 33 -async def test_training_script(tmpdir): +async def test_training_script(tmp_path: Path): await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE, - tmpdir.strpath, + str(tmp_path), policy_config="data/test_config/max_hist_config.yml", interpreter=RegexInterpreter(), - kwargs={}, + additional_arguments={}, ) assert True -async def test_training_script_without_max_history_set(tmpdir): +async def test_training_script_without_max_history_set(tmp_path: Path): + tmpdir = str(tmp_path) await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE, - tmpdir.strpath, + tmpdir, interpreter=RegexInterpreter(), policy_config="data/test_config/no_max_hist_config.yml", - kwargs={}, + additional_arguments={}, ) - agent = Agent.load(tmpdir.strpath) + agent = Agent.load(tmpdir) for policy in agent.policy_ensemble.policies: if hasattr(policy.featurizer, "max_history"): if type(policy) == FormPolicy: @@ -78,16 +100,18 @@ async def test_training_script_without_max_history_set(tmpdir): ) -async def test_training_script_with_max_history_set(tmpdir): +async def test_training_script_with_max_history_set(tmp_path: Path): + tmpdir = str(tmp_path) + await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE, - tmpdir.strpath, + tmpdir, interpreter=RegexInterpreter(), policy_config="data/test_config/max_hist_config.yml", - kwargs={}, + additional_arguments={}, ) - agent = Agent.load(tmpdir.strpath) + agent = Agent.load(tmpdir) for policy in agent.policy_ensemble.policies: if hasattr(policy.featurizer, "max_history"): if type(policy) == FormPolicy: @@ -96,51 +120,84 @@ async def test_training_script_with_max_history_set(tmpdir): assert policy.featurizer.max_history == 5 -async def test_training_script_with_restart_stories(tmpdir): +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_restart.md", + "data/test_yaml_stories/stories_restart.yml", + ], +) +async def test_training_script_with_restart_stories(stories_file: Text, tmp_path: Path): await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, - "data/test_stories/stories_restart.md", - tmpdir.strpath, + stories_file, + str(tmp_path), interpreter=RegexInterpreter(), policy_config="data/test_config/max_hist_config.yml", - kwargs={}, + additional_arguments={}, ) assert True -def configs_for_random_seed_test(): +def configs_for_random_seed_test() -> List[Text]: # define the configs for the random_seed tests - return [ - "data/test_config/keras_random_seed.yaml", - "data/test_config/embedding_random_seed.yaml", - ] + return ["data/test_config/ted_random_seed.yaml"] @pytest.mark.parametrize("config_file", configs_for_random_seed_test()) -async def test_random_seed(tmpdir, config_file): +async def test_random_seed(tmp_path: Path, config_file: Text): # set random seed in config file to # generate a reproducible training result + agent_1 = await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE, - tmpdir.strpath + "1", + str(tmp_path / "1"), interpreter=RegexInterpreter(), policy_config=config_file, - kwargs={}, + additional_arguments={}, ) agent_2 = await train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE, - tmpdir.strpath + "2", + str(tmp_path / "2"), interpreter=RegexInterpreter(), policy_config=config_file, - kwargs={}, + additional_arguments={}, ) processor_1 = agent_1.create_processor() processor_2 = agent_2.create_processor() - probs_1 = processor_1.predict_next("1") - probs_2 = processor_2.predict_next("2") + probs_1 = await processor_1.predict_next("1") + probs_2 = await processor_2.predict_next("2") assert probs_1["confidence"] == probs_2["confidence"] + + +async def test_trained_interpreter_passed_to_policies( + tmp_path: Path, monkeypatch: MonkeyPatch +): + from rasa.core.policies.ted_policy import TEDPolicy + + policies_config = {"policies": [{"name": TEDPolicy.__name__}]} + + policy_train = Mock() + monkeypatch.setattr(TEDPolicy, "train", policy_train) + + interpreter = Mock(spec=RasaNLUInterpreter) + + await train( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_STORIES_FILE, + str(tmp_path), + interpreter=interpreter, + policy_config=policies_config, + additional_arguments={}, + ) + + policy_train.assert_called_once() + + assert policy_train.call_count == 1 + _, _, kwargs = policy_train.mock_calls[0] + assert kwargs["interpreter"] == interpreter diff --git a/tests/core/test_utils.py b/tests/core/test_utils.py index 5416ed9bef48..4ee3935350cd 100644 --- a/tests/core/test_utils.py +++ b/tests/core/test_utils.py @@ -1,18 +1,19 @@ -import asyncio +import os +import random + +from decimal import Decimal +from typing import Optional, Text, Union, Any +from pathlib import Path import pytest +import rasa.core.lock_store import rasa.utils.io +from rasa.constants import ENV_SANIC_WORKERS from rasa.core import utils - - -@pytest.fixture(scope="session") -def loop(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop = rasa.utils.io.enable_async_loop_debugging(loop) - yield loop - loop.close() +from rasa.core.lock_store import LockStore, RedisLockStore, InMemoryLockStore +from rasa.utils.endpoints import EndpointConfig +from tests.conftest import write_endpoint_config_to_yaml def test_is_int(): @@ -91,3 +92,170 @@ def test_convert_bytes_to_string(): # string remains string assert utils.convert_bytes_to_string(decoded_string) == decoded_string + + +@pytest.mark.parametrize( + "_input,expected", + [ + # `int` is not converted + (-1, -1), + # `float` is converted + (2.1, round(Decimal(2.1), 4)), + # `float` that's too long is rounded + (1579507733.1107571125030517578125, Decimal("1579507733.110757")), + # strings are not converted + (["one", "two"], ["one", "two"]), + # list of `float`s is converted + ( + [1.0, -2.1, 3.2], + [round(Decimal(1.0), 4), round(Decimal(-2.1), 4), round(Decimal(3.2), 4)], + ), + # dictionary containing list of `float`s and `float`s is converted + ( + {"list_with_floats": [4.5, -5.6], "float": 6.7}, + { + "list_with_floats": [round(Decimal(4.5), 4), round(Decimal(-5.6), 4)], + "float": round(Decimal(6.7), 4), + }, + ), + ], +) +def test_replace_floats_with_decimals(_input: Any, expected: Any): + assert utils.replace_floats_with_decimals(_input) == expected + + +@pytest.mark.parametrize( + "_input,expected", + [ + # `int` is not converted + (-1, -1), + # `float` is converted + (Decimal(2.1), 2.1), + # `float` that's too long is rounded to default 9 decimal places + (Decimal("1579507733.11075711345834582304234"), 1579507733.110757113), + # strings are not converted + (["one", "two"], ["one", "two"]), + # list of `Decimal`s is converted + ([Decimal(1.0), Decimal(-2.1), Decimal(3.2)], [1.0, -2.1, 3.2]), + # dictionary containing list of `Decimal`s and `Decimal`s is converted + ( + {"list_with_floats": [Decimal(4.5), Decimal(-5.6)], "float": Decimal(6.7)}, + {"list_with_floats": [4.5, -5.6], "float": 6.7}, + ), + ], +) +def test_replace_decimals_with_floats(_input: Any, expected: Any): + assert utils.replace_decimals_with_floats(_input) == expected + + +@pytest.mark.parametrize( + "env_value,lock_store,expected", + [ + (1, "redis", 1), + (4, "redis", 4), + (None, "redis", 1), + (0, "redis", 1), + (-4, "redis", 1), + ("illegal value", "redis", 1), + (None, None, 1), + (None, "in_memory", 1), + (5, "in_memory", 1), + (2, None, 1), + (0, "in_memory", 1), + (3, RedisLockStore(), 3), + (2, InMemoryLockStore(), 1), + ], +) +def test_get_number_of_sanic_workers( + env_value: Optional[Text], + lock_store: Union[LockStore, Text, None], + expected: Optional[int], +): + # remember pre-test value of SANIC_WORKERS env var + pre_test_value = os.environ.get(ENV_SANIC_WORKERS) + + # set env var to desired value and make assertion + if env_value is not None: + os.environ[ENV_SANIC_WORKERS] = str(env_value) + + # lock_store may be string or LockStore object + # create EndpointConfig if it's a string, otherwise pass the object + if isinstance(lock_store, str): + lock_store = EndpointConfig(type=lock_store) + + assert utils.number_of_sanic_workers(lock_store) == expected + + # reset env var to pre-test value + os.environ.pop(ENV_SANIC_WORKERS, None) + + if pre_test_value is not None: + os.environ[ENV_SANIC_WORKERS] = pre_test_value + + +@pytest.mark.parametrize( + "lock_store,expected", + [ + (EndpointConfig(type="redis"), True), + (RedisLockStore(), True), + (EndpointConfig(type="in_memory"), False), + (EndpointConfig(type="random_store"), False), + (None, False), + (InMemoryLockStore(), False), + ], +) +def test_lock_store_is_redis_lock_store( + lock_store: Union[EndpointConfig, LockStore, None], expected: bool +): + # noinspection PyProtectedMember + assert rasa.core.utils._lock_store_is_redis_lock_store(lock_store) == expected + + +def test_all_subclasses(): + num = random.randint(1, 10) + + class TestClass: + pass + + classes = [type(f"TestClass{i}", (TestClass,), {}) for i in range(num)] + + assert utils.all_subclasses(TestClass) == classes + + +def test_read_endpoints_from_path(tmp_path: Path): + # write valid config to file + endpoints_path = write_endpoint_config_to_yaml( + tmp_path, {"event_broker": {"type": "pika"}, "tracker_store": {"type": "sql"}} + ) + + # noinspection PyProtectedMember + available_endpoints = utils.read_endpoints_from_path(endpoints_path) + + # assert event broker and tracker store are valid, others are not + assert available_endpoints.tracker_store and available_endpoints.event_broker + assert not all( + ( + available_endpoints.lock_store, + available_endpoints.nlg, + available_endpoints.action, + available_endpoints.model, + available_endpoints.nlu, + ) + ) + + +def test_read_endpoints_from_wrong_path(): + # noinspection PyProtectedMember + available_endpoints = utils.read_endpoints_from_path("/some/wrong/path") + + # endpoint config is still initialised but does not contain anything + assert not all( + ( + available_endpoints.lock_store, + available_endpoints.nlg, + available_endpoints.event_broker, + available_endpoints.tracker_store, + available_endpoints.action, + available_endpoints.model, + available_endpoints.nlu, + ) + ) diff --git a/tests/core/test_validator.py b/tests/core/test_validator.py deleted file mode 100644 index adbe2a8e6b29..000000000000 --- a/tests/core/test_validator.py +++ /dev/null @@ -1,57 +0,0 @@ -import pytest -from rasa.core.validator import Validator -from rasa.importers.rasa import RasaFileImporter -from tests.core.conftest import ( - DEFAULT_DOMAIN_PATH_WITH_SLOTS, - DEFAULT_STORIES_FILE, - DEFAULT_NLU_DATA, -) -from rasa.core.domain import Domain -from rasa.nlu.training_data import TrainingData -import rasa.utils.io as io_utils - - -async def test_verify_intents_does_not_fail_on_valid_data(): - importer = RasaFileImporter( - domain_path="examples/moodbot/domain.yml", - training_data_paths=["examples/moodbot/data/nlu.md"], - ) - validator = await Validator.from_importer(importer) - assert validator.verify_intents() - - -async def test_verify_intents_does_fail_on_invalid_data(): - # domain and nlu data are from different domain and should produce warnings - importer = RasaFileImporter( - domain_path="data/test_domains/default.yml", - training_data_paths=["examples/moodbot/data/nlu.md"], - ) - validator = await Validator.from_importer(importer) - assert not validator.verify_intents() - - -async def test_verify_valid_utterances(): - importer = RasaFileImporter( - domain_path="data/test_domains/default.yml", - training_data_paths=[DEFAULT_NLU_DATA, DEFAULT_STORIES_FILE], - ) - validator = await Validator.from_importer(importer) - assert validator.verify_utterances() - - -async def test_fail_on_invalid_utterances(tmpdir): - # domain and stories are from different domain and should produce warnings - invalid_domain = str(tmpdir / "invalid_domain.yml") - io_utils.write_yaml_file( - { - "templates": {"utter_greet": {"text": "hello"}}, - "actions": [ - "utter_greet", - "utter_non_existent", # error: utter template odes not exist - ], - }, - invalid_domain, - ) - importer = RasaFileImporter(domain_path=invalid_domain) - validator = await Validator.from_importer(importer) - assert not validator.verify_utterances() diff --git a/tests/core/test_visualization.py b/tests/core/test_visualization.py index 79080f8192b6..6ad408f51786 100644 --- a/tests/core/test_visualization.py +++ b/tests/core/test_visualization.py @@ -1,5 +1,11 @@ +from typing import Text + +import pytest + +from rasa.core.domain import Domain from rasa.core.events import ActionExecuted, SlotSet, UserUttered from rasa.core.training import visualization +import rasa.utils.io def test_style_transfer(): @@ -72,14 +78,18 @@ def test_common_action_prefix_unequal(): assert num_common == 0 -async def test_graph_persistence(default_domain, tmpdir): +@pytest.mark.parametrize( + "stories_file", + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], +) +async def test_graph_persistence(stories_file: Text, default_domain: Domain, tmpdir): from os.path import isfile from networkx.drawing import nx_pydot - from rasa.core.training.dsl import StoryFileReader from rasa.core.interpreter import RegexInterpreter + import rasa.core.training.loading as core_loading - story_steps = await StoryFileReader.read_from_file( - "data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter() + story_steps = await core_loading.load_data_from_resource( + stories_file, default_domain, RegexInterpreter() ) out_file = tmpdir.join("graph.html").strpath generated_graph = await visualization.visualize_stories( @@ -94,8 +104,30 @@ async def test_graph_persistence(default_domain, tmpdir): assert isfile(out_file) - with open(out_file, "r") as graph_file: - content = graph_file.read() + content = rasa.utils.io.read_file(out_file) assert "isClient = true" in content assert "graph = `{}`".format(generated_graph.to_string()) in content + + +@pytest.mark.parametrize( + "stories_file", + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], +) +async def test_merge_nodes(stories_file: Text, default_domain: Domain, tmpdir): + from os.path import isfile + from rasa.core.interpreter import RegexInterpreter + import rasa.core.training.loading as core_loading + + story_steps = await core_loading.load_data_from_resource( + stories_file, default_domain, RegexInterpreter() + ) + out_file = tmpdir.join("graph.html").strpath + await visualization.visualize_stories( + story_steps, + default_domain, + output_file=out_file, + max_history=3, + should_merge_nodes=True, + ) + assert isfile(out_file) diff --git a/tests/core/training/story_reader/test_common_story_reader.py b/tests/core/training/story_reader/test_common_story_reader.py new file mode 100644 index 000000000000..f1d86cb5fde6 --- /dev/null +++ b/tests/core/training/story_reader/test_common_story_reader.py @@ -0,0 +1,261 @@ +import json +import os +from collections import Counter +from pathlib import Path +from typing import Text, List + +import numpy as np +import pytest + +from rasa.core import training +from rasa.core.domain import Domain +from rasa.core.events import UserUttered, ActionExecuted, SessionStarted +from rasa.core.featurizers import ( + MaxHistoryTrackerFeaturizer, + BinarySingleStateFeaturizer, +) +from rasa.nlu.constants import INTENT_NAME_KEY + + +@pytest.mark.parametrize( + "stories_file", + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], +) +async def test_can_read_test_story(stories_file: Text, default_domain: Domain): + trackers = await training.load_data( + stories_file, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + assert len(trackers) == 7 + # this should be the story simple_story_with_only_end -> show_it_all + # the generated stories are in a non stable order - therefore we need to + # do some trickery to find the one we want to test + tracker = [t for t in trackers if len(t.events) == 5][0] + assert tracker.events[0] == ActionExecuted("action_listen") + assert tracker.events[1] == UserUttered( + "simple", + intent={INTENT_NAME_KEY: "simple", "confidence": 1.0}, + parse_data={ + "text": "/simple", + "intent_ranking": [{"confidence": 1.0, INTENT_NAME_KEY: "simple"}], + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "simple"}, + "entities": [], + }, + ) + assert tracker.events[2] == ActionExecuted("utter_default") + assert tracker.events[3] == ActionExecuted("utter_greet") + assert tracker.events[4] == ActionExecuted("action_listen") + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_checkpoint_after_or.md", + "data/test_yaml_stories/stories_checkpoint_after_or.yml", + ], +) +async def test_can_read_test_story_with_checkpoint_after_or( + stories_file: Text, default_domain: Domain +): + trackers = await training.load_data( + stories_file, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + assert len(trackers) == 2 + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_with_cycle.md", + "data/test_yaml_stories/stories_with_cycle.yml", + ], +) +async def test_read_story_file_with_cycles(stories_file: Text, default_domain: Domain): + graph = await training.extract_story_graph(stories_file, default_domain) + + assert len(graph.story_steps) == 5 + + graph_without_cycles = graph.with_cycles_removed() + + assert graph.cyclic_edge_ids != set() + # sorting removed_edges converting set converting it to list + assert graph_without_cycles.cyclic_edge_ids == list() + + assert len(graph.story_steps) == len(graph_without_cycles.story_steps) == 5 + + assert len(graph_without_cycles.story_end_checkpoints) == 2 + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_with_cycle.md", + "data/test_yaml_stories/stories_with_cycle.yml", + ], +) +async def test_generate_training_data_with_cycles( + stories_file: Text, default_domain: Domain +): + featurizer = MaxHistoryTrackerFeaturizer( + BinarySingleStateFeaturizer(), max_history=4 + ) + training_trackers = await training.load_data( + stories_file, default_domain, augmentation_factor=0 + ) + training_data = featurizer.featurize_trackers(training_trackers, default_domain) + y = training_data.y.argmax(axis=-1) + + # how many there are depends on the graph which is not created in a + # deterministic way but should always be 3 or 4 + assert len(training_trackers) == 3 or len(training_trackers) == 4 + + # if we have 4 trackers, there is going to be one example more for label 10 + num_tens = len(training_trackers) - 1 + # if new default actions are added the keys of the actions will be changed + + assert Counter(y) == {0: 6, 12: num_tens, 14: 1, 1: 2, 13: 3} + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_unused_checkpoints.md", + "data/test_yaml_stories/stories_unused_checkpoints.yml", + ], +) +async def test_generate_training_data_with_unused_checkpoints( + stories_file: Text, default_domain: Domain +): + training_trackers = await training.load_data(stories_file, default_domain) + # there are 3 training stories: + # 2 with unused end checkpoints -> training_trackers + # 1 with unused start checkpoints -> ignored + assert len(training_trackers) == 2 + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_defaultdomain.md", + "data/test_yaml_stories/stories_defaultdomain.yml", + ], +) +async def test_generate_training_data_original_and_augmented_trackers( + stories_file: Text, default_domain: Domain +): + training_trackers = await training.load_data( + stories_file, default_domain, augmentation_factor=3 + ) + # there are three original stories + # augmentation factor of 3 indicates max of 3*10 augmented stories generated + # maximum number of stories should be augmented+original = 33 + original_trackers = [ + t + for t in training_trackers + if not hasattr(t, "is_augmented") or not t.is_augmented + ] + assert len(original_trackers) == 3 + assert len(training_trackers) <= 33 + + +@pytest.mark.parametrize( + "stories_file", + [ + "data/test_stories/stories_with_cycle.md", + "data/test_yaml_stories/stories_with_cycle.yml", + ], +) +async def test_visualize_training_data_graph( + stories_file: Text, tmpdir, default_domain: Domain +): + graph = await training.extract_story_graph(stories_file, default_domain) + + graph = graph.with_cycles_removed() + + out_path = tmpdir.join("graph.html").strpath + + # this will be the plotted networkx graph + G = graph.visualize(out_path) + + assert os.path.exists(out_path) + + # we can't check the exact topology - but this should be enough to ensure + # the visualisation created a sane graph + assert set(G.nodes()) == set(range(-1, 13)) or set(G.nodes()) == set(range(-1, 14)) + if set(G.nodes()) == set(range(-1, 13)): + assert len(G.edges()) == 14 + elif set(G.nodes()) == set(range(-1, 14)): + assert len(G.edges()) == 16 + + +@pytest.mark.parametrize( + "stories_resources", + [ + ["data/test_stories/stories.md", "data/test_multifile_stories"], + ["data/test_yaml_stories/stories.yml", "data/test_multifile_yaml_stories"], + ["data/test_stories/stories.md", "data/test_multifile_yaml_stories"], + ["data/test_yaml_stories/stories.yml", "data/test_multifile_stories"], + ["data/test_stories/stories.md", "data/test_mixed_yaml_md_stories"], + ], +) +async def test_load_multi_file_training_data( + stories_resources: List, default_domain: Domain +): + # the stories file in `data/test_multifile_stories` is the same as in + # `data/test_stories/stories.md`, but split across multiple files + featurizer = MaxHistoryTrackerFeaturizer( + BinarySingleStateFeaturizer(), max_history=2 + ) + trackers = await training.load_data( + stories_resources[0], default_domain, augmentation_factor=0 + ) + (tr_as_sts, tr_as_acts) = featurizer.training_states_and_actions( + trackers, default_domain + ) + hashed = [] + for sts, acts in zip(tr_as_sts, tr_as_acts): + hashed.append(json.dumps(sts + acts, sort_keys=True)) + hashed = sorted(hashed, reverse=True) + + data = featurizer.featurize_trackers(trackers, default_domain) + + featurizer_mul = MaxHistoryTrackerFeaturizer( + BinarySingleStateFeaturizer(), max_history=2 + ) + trackers_mul = await training.load_data( + stories_resources[1], default_domain, augmentation_factor=0 + ) + (tr_as_sts_mul, tr_as_acts_mul) = featurizer.training_states_and_actions( + trackers_mul, default_domain + ) + hashed_mul = [] + for sts_mul, acts_mul in zip(tr_as_sts_mul, tr_as_acts_mul): + hashed_mul.append(json.dumps(sts_mul + acts_mul, sort_keys=True)) + hashed_mul = sorted(hashed_mul, reverse=True) + + data_mul = featurizer_mul.featurize_trackers(trackers_mul, default_domain) + + assert hashed == hashed_mul + + assert np.all(data.X.sort(axis=0) == data_mul.X.sort(axis=0)) + assert np.all(data.y.sort(axis=0) == data_mul.y.sort(axis=0)) + + +async def test_load_training_data_reader_not_found_throws( + tmp_path: Path, default_domain: Domain +): + (tmp_path / "file").touch() + + with pytest.raises(Exception): + await training.load_data(str(tmp_path), default_domain) + + +def test_session_started_event_is_not_serialised(): + assert SessionStarted().as_story_string() is None diff --git a/tests/core/training/story_reader/test_markdown_story_reader.py b/tests/core/training/story_reader/test_markdown_story_reader.py new file mode 100644 index 000000000000..125f2f1c3482 --- /dev/null +++ b/tests/core/training/story_reader/test_markdown_story_reader.py @@ -0,0 +1,297 @@ +import rasa.utils.io +from rasa.core import training +from rasa.core.domain import Domain +from rasa.core.events import ( + UserUttered, + ActionExecuted, + ActionExecutionRejected, + ActiveLoop, + FormValidation, + SlotSet, + LegacyForm, +) +from rasa.core.interpreter import RegexInterpreter +from rasa.core.trackers import DialogueStateTracker +from rasa.core.training import loading +from rasa.core.training.story_reader.markdown_story_reader import MarkdownStoryReader +from rasa.core.training.structures import Story + + +async def test_persist_and_read_test_story_graph(tmpdir, default_domain: Domain): + graph = await training.extract_story_graph( + "data/test_stories/stories.md", default_domain + ) + out_path = tmpdir.join("persisted_story.md") + rasa.utils.io.write_text_file(graph.as_story_string(), out_path.strpath) + + recovered_trackers = await training.load_data( + out_path.strpath, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + existing_trackers = await training.load_data( + "data/test_stories/stories.md", + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + + existing_stories = {t.export_stories() for t in existing_trackers} + for t in recovered_trackers: + story_str = t.export_stories() + assert story_str in existing_stories + existing_stories.discard(story_str) + + +async def test_persist_and_read_test_story(tmpdir, default_domain: Domain): + graph = await training.extract_story_graph( + "data/test_stories/stories.md", default_domain + ) + out_path = tmpdir.join("persisted_story.md") + Story(graph.story_steps).dump_to_file(out_path.strpath) + + recovered_trackers = await training.load_data( + out_path.strpath, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + existing_trackers = await training.load_data( + "data/test_stories/stories.md", + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + existing_stories = {t.export_stories() for t in existing_trackers} + for t in recovered_trackers: + story_str = t.export_stories() + assert story_str in existing_stories + existing_stories.discard(story_str) + + +async def test_persist_legacy_form_story(): + domain = Domain.load("data/test_domains/form.yml") + + tracker = DialogueStateTracker("", domain.slots) + + story = ( + "* greet\n" + " - utter_greet\n" + "* start_form\n" + " - some_form\n" + ' - form{"name": "some_form"}\n' + "* default\n" + " - utter_default\n" + " - some_form\n" + "* stop\n" + " - utter_ask_continue\n" + "* affirm\n" + " - some_form\n" + "* stop\n" + " - utter_ask_continue\n" + "* inform\n" + " - some_form\n" + ' - form{"name": null}\n' + "* goodbye\n" + " - utter_goodbye\n" + ) + + # simulate talking to the form + events = [ + UserUttered(intent={"name": "greet"}), + ActionExecuted("utter_greet"), + ActionExecuted("action_listen"), + # start the form + UserUttered(intent={"name": "start_form"}), + ActionExecuted("some_form"), + ActiveLoop("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "default"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_default"), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "stop"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_ask_continue"), + ActionExecuted("action_listen"), + # out of form input but continue with the form + UserUttered(intent={"name": "affirm"}), + FormValidation(False), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "stop"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_ask_continue"), + ActionExecuted("action_listen"), + # form input + UserUttered(intent={"name": "inform"}), + FormValidation(True), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + ActiveLoop(None), + UserUttered(intent={"name": "goodbye"}), + ActionExecuted("utter_goodbye"), + ActionExecuted("action_listen"), + ] + [tracker.update(e) for e in events] + + story = story.replace(f"- {LegacyForm.type_name}", f"- {ActiveLoop.type_name}") + + assert story in tracker.export_stories() + + +async def test_persist_form_story(): + domain = Domain.load("data/test_domains/form.yml") + + tracker = DialogueStateTracker("", domain.slots) + + story = ( + "* greet\n" + " - utter_greet\n" + "* start_form\n" + " - some_form\n" + ' - active_loop{"name": "some_form"}\n' + "* default\n" + " - utter_default\n" + " - some_form\n" + "* stop\n" + " - utter_ask_continue\n" + "* affirm\n" + " - some_form\n" + "* stop\n" + " - utter_ask_continue\n" + "* inform\n" + " - some_form\n" + ' - active_loop{"name": null}\n' + "* goodbye\n" + " - utter_goodbye\n" + ) + + # simulate talking to the form + events = [ + UserUttered(intent={"name": "greet"}), + ActionExecuted("utter_greet"), + ActionExecuted("action_listen"), + # start the form + UserUttered(intent={"name": "start_form"}), + ActionExecuted("some_form"), + ActiveLoop("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "default"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_default"), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "stop"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_ask_continue"), + ActionExecuted("action_listen"), + # out of form input but continue with the form + UserUttered(intent={"name": "affirm"}), + FormValidation(False), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + # out of form input + UserUttered(intent={"name": "stop"}), + ActionExecutionRejected("some_form"), + ActionExecuted("utter_ask_continue"), + ActionExecuted("action_listen"), + # form input + UserUttered(intent={"name": "inform"}), + FormValidation(True), + ActionExecuted("some_form"), + ActionExecuted("action_listen"), + ActiveLoop(None), + UserUttered(intent={"name": "goodbye"}), + ActionExecuted("utter_goodbye"), + ActionExecuted("action_listen"), + ] + [tracker.update(e) for e in events] + + assert story in tracker.export_stories() + + +async def test_read_stories_with_multiline_comments(tmpdir, default_domain: Domain): + reader = MarkdownStoryReader(RegexInterpreter(), default_domain) + + story_steps = await reader.read_from_file( + "data/test_stories/stories_with_multiline_comments.md" + ) + + assert len(story_steps) == 4 + assert story_steps[0].block_name == "happy path" + assert len(story_steps[0].events) == 4 + assert story_steps[1].block_name == "sad path 1" + assert len(story_steps[1].events) == 7 + assert story_steps[2].block_name == "sad path 2" + assert len(story_steps[2].events) == 7 + assert story_steps[3].block_name == "say goodbye" + assert len(story_steps[3].events) == 2 + + +async def test_read_stories_with_rules(default_domain: Domain): + story_steps = await loading.load_data_from_files( + ["data/test_stories/stories_with_rules.md"], default_domain, RegexInterpreter() + ) + + # this file contains three rules and two ML stories + assert len(story_steps) == 5 + + ml_steps = [s for s in story_steps if not s.is_rule] + rule_steps = [s for s in story_steps if s.is_rule] + + assert len(ml_steps) == 2 + assert len(rule_steps) == 3 + + assert story_steps[0].block_name == "rule 1" + assert story_steps[1].block_name == "rule 2" + assert story_steps[2].block_name == "ML story 1" + assert story_steps[3].block_name == "rule 3" + assert story_steps[4].block_name == "ML story 2" + + +async def test_read_rules_without_stories(default_domain: Domain): + story_steps = await loading.load_data_from_files( + ["data/test_stories/rules_without_stories.md"], + default_domain, + RegexInterpreter(), + ) + + # this file contains three rules and two ML stories + assert len(story_steps) == 3 + + ml_steps = [s for s in story_steps if not s.is_rule] + rule_steps = [s for s in story_steps if s.is_rule] + + assert len(ml_steps) == 0 + assert len(rule_steps) == 3 + + assert rule_steps[0].block_name == "rule 1" + assert rule_steps[1].block_name == "rule 2" + assert rule_steps[2].block_name == "rule 3" + + # inspect the first rule and make sure all events were picked up correctly + events = rule_steps[0].events + + assert len(events) == 5 + + assert events[0] == ActiveLoop("loop_q_form") + assert events[1] == SlotSet("requested_slot", "some_slot") + assert events[2] == ActionExecuted("...") + assert events[3] == UserUttered( + 'inform{"some_slot":"bla"}', + {"name": "inform", "confidence": 1.0}, + [{"entity": "some_slot", "start": 6, "end": 25, "value": "bla"}], + ) + assert events[4] == ActionExecuted("loop_q_form") diff --git a/tests/core/training/story_reader/test_yaml_story_reader.py b/tests/core/training/story_reader/test_yaml_story_reader.py new file mode 100644 index 000000000000..5fb8cd7b2e3d --- /dev/null +++ b/tests/core/training/story_reader/test_yaml_story_reader.py @@ -0,0 +1,307 @@ +from typing import Text, List + +import pytest + +from rasa.constants import LATEST_TRAINING_DATA_FORMAT_VERSION +from rasa.core import training +from rasa.core.actions.action import RULE_SNIPPET_ACTION_NAME +from rasa.core.domain import Domain +from rasa.core.training import loading +from rasa.core.events import ActionExecuted, UserUttered, SlotSet, ActiveLoop +from rasa.core.interpreter import RegexInterpreter +from rasa.core.training.story_reader.yaml_story_reader import YAMLStoryReader +from rasa.core.training.structures import StoryStep +from rasa.utils import io as io_utils + + +@pytest.fixture() +async def rule_steps_without_stories(default_domain: Domain) -> List[StoryStep]: + yaml_file = "data/test_yaml_stories/rules_without_stories.yml" + + return await loading.load_data_from_files( + [yaml_file], default_domain, RegexInterpreter() + ) + + +async def test_can_read_test_story_with_slots(default_domain: Domain): + trackers = await training.load_data( + "data/test_yaml_stories/simple_story_with_only_end.yml", + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + assert len(trackers) == 1 + + assert trackers[0].events[-2] == SlotSet(key="name", value="peter") + assert trackers[0].events[-1] == ActionExecuted("action_listen") + + +async def test_can_read_test_story_with_entities_slot_autofill(default_domain: Domain): + trackers = await training.load_data( + "data/test_yaml_stories/story_with_or_and_entities.yml", + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + assert len(trackers) == 2 + + assert trackers[0].events[-3] == UserUttered( + "greet", + intent={"name": "greet", "confidence": 1.0}, + parse_data={ + "text": "/greet", + "intent_ranking": [{"confidence": 1.0, "name": "greet"}], + "intent": {"confidence": 1.0, "name": "greet"}, + "entities": [], + }, + ) + assert trackers[0].events[-2] == ActionExecuted("utter_greet") + assert trackers[0].events[-1] == ActionExecuted("action_listen") + + assert trackers[1].events[-4] == UserUttered( + "greet", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": "name", "value": "peter"}], + parse_data={ + "text": "/greet", + "intent_ranking": [{"confidence": 1.0, "name": "greet"}], + "intent": {"confidence": 1.0, "name": "greet"}, + "entities": [{"entity": "name", "value": "peter"}], + }, + ) + assert trackers[1].events[-3] == SlotSet(key="name", value="peter") + assert trackers[1].events[-2] == ActionExecuted("utter_greet") + assert trackers[1].events[-1] == ActionExecuted("action_listen") + + +async def test_can_read_test_story_with_entities_without_value(default_domain: Domain,): + trackers = await training.load_data( + "data/test_yaml_stories/story_with_or_and_entities_with_no_value.yml", + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + assert len(trackers) == 1 + + assert trackers[0].events[-4] == UserUttered( + "greet", + intent={"name": "greet", "confidence": 1.0}, + entities=[{"entity": "name", "value": ""}], + parse_data={ + "text": "/greet", + "intent_ranking": [{"confidence": 1.0, "name": "greet"}], + "intent": {"confidence": 1.0, "name": "greet"}, + "entities": [{"entity": "name", "value": ""}], + }, + ) + assert trackers[0].events[-2] == ActionExecuted("utter_greet") + assert trackers[0].events[-1] == ActionExecuted("action_listen") + + +@pytest.mark.parametrize( + "file,is_yaml_file", + [ + ("data/test_yaml_stories/stories.yml", True), + ("data/test_stories/stories.md", False), + ("data/test_yaml_stories/rules_without_stories.yml", True), + ], +) +async def test_is_yaml_file(file: Text, is_yaml_file: bool): + assert YAMLStoryReader.is_yaml_story_file(file) == is_yaml_file + + +async def test_yaml_intent_with_leading_slash_warning(default_domain: Domain): + yaml_file = "data/test_wrong_yaml_stories/intent_with_leading_slash.yml" + + with pytest.warns(UserWarning) as record: + tracker = await training.load_data( + yaml_file, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + + # one for leading slash, one for missing version + assert len(record) == 2 + + assert tracker[0].latest_message == UserUttered("simple", {"name": "simple"}) + + +async def test_yaml_slot_without_value_is_parsed(default_domain: Domain): + yaml_file = "data/test_yaml_stories/story_with_slot_was_set.yml" + + tracker = await training.load_data( + yaml_file, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + + assert tracker[0].events[-2] == SlotSet(key="name", value=None) + + +async def test_yaml_wrong_yaml_format_warning(default_domain: Domain): + yaml_file = "data/test_wrong_yaml_stories/wrong_yaml.yml" + + with pytest.warns(UserWarning): + _ = await training.load_data( + yaml_file, + default_domain, + use_story_concatenation=False, + tracker_limit=1000, + remove_duplicates=False, + ) + + +async def test_read_rules_with_stories(default_domain: Domain): + + yaml_file = "data/test_yaml_stories/stories_and_rules.yml" + + steps = await loading.load_data_from_files( + [yaml_file], default_domain, RegexInterpreter() + ) + + ml_steps = [s for s in steps if not s.is_rule] + rule_steps = [s for s in steps if s.is_rule] + + # this file contains three rules and three ML stories + assert len(ml_steps) == 3 + assert len(rule_steps) == 3 + + assert rule_steps[0].block_name == "rule 1" + assert rule_steps[1].block_name == "rule 2" + assert rule_steps[2].block_name == "rule 3" + + assert ml_steps[0].block_name == "simple_story_without_checkpoint" + assert ml_steps[1].block_name == "simple_story_with_only_start" + assert ml_steps[2].block_name == "simple_story_with_only_end" + + +def test_read_rules_without_stories(rule_steps_without_stories: List[StoryStep]): + ml_steps = [s for s in rule_steps_without_stories if not s.is_rule] + rule_steps = [s for s in rule_steps_without_stories if s.is_rule] + + # this file contains five rules and no ML stories + assert len(ml_steps) == 0 + assert len(rule_steps) == 5 + + +def test_rule_with_condition(rule_steps_without_stories: List[StoryStep]): + rule = rule_steps_without_stories[0] + assert rule.block_name == "Rule with condition" + assert rule.events == [ + ActiveLoop("loop_q_form"), + SlotSet("requested_slot", "some_slot"), + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + UserUttered( + "inform", + {"name": "inform", "confidence": 1.0}, + [{"entity": "some_slot", "value": "bla"}], + ), + ActionExecuted("loop_q_form"), + ] + + +def test_rule_without_condition(rule_steps_without_stories: List[StoryStep]): + rule = rule_steps_without_stories[1] + assert rule.block_name == "Rule without condition" + assert rule.events == [ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + UserUttered("explain", {"name": "explain", "confidence": 1.0}, []), + ActionExecuted("utter_explain_some_slot"), + ActionExecuted("loop_q_form"), + ActiveLoop("loop_q_form"), + ] + + +def test_rule_with_explicit_wait_for_user_message( + rule_steps_without_stories: List[StoryStep], +): + rule = rule_steps_without_stories[2] + assert rule.block_name == "Rule which explicitly waits for user input when finished" + assert rule.events == [ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + UserUttered("explain", {"name": "explain", "confidence": 1.0}, []), + ActionExecuted("utter_explain_some_slot"), + ] + + +def test_rule_which_hands_over_at_end(rule_steps_without_stories: List[StoryStep]): + rule = rule_steps_without_stories[3] + assert rule.block_name == "Rule after which another action should be predicted" + assert rule.events == [ + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + UserUttered("explain", {"name": "explain", "confidence": 1.0}, []), + ActionExecuted("utter_explain_some_slot"), + ActionExecuted(RULE_SNIPPET_ACTION_NAME), + ] + + +def test_conversation_start_rule(rule_steps_without_stories: List[StoryStep]): + rule = rule_steps_without_stories[4] + assert rule.block_name == "Rule which only applies to conversation start" + assert rule.events == [ + UserUttered("explain", {"name": "explain", "confidence": 1.0}, []), + ActionExecuted("utter_explain_some_slot"), + ] + + +async def test_warning_if_intent_not_in_domain(default_domain: Domain): + stories = """ + stories: + - story: I am gonna make you explode 💥 + steps: + # Intent defined in user key. + - intent: definitely not in domain + """ + + reader = YAMLStoryReader(RegexInterpreter(), default_domain) + yaml_content = io_utils.read_yaml(stories) + + with pytest.warns(UserWarning) as record: + reader.read_from_parsed_yaml(yaml_content) + + # one for missing intent, one for missing version + assert len(record) == 2 + + +async def test_no_warning_if_intent_in_domain(default_domain: Domain): + stories = ( + f'version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"\n' + f"stories:\n" + f"- story: I am fine 💥\n" + f" steps:\n" + f" - intent: greet" + ) + + reader = YAMLStoryReader(RegexInterpreter(), default_domain) + yaml_content = io_utils.read_yaml(stories) + + with pytest.warns(None) as record: + reader.read_from_parsed_yaml(yaml_content) + + assert not len(record) + + +async def test_active_loop_is_parsed(default_domain: Domain): + stories = ( + f'version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"\n' + f"stories:\n" + f"- story: name\n" + f" steps:\n" + f" - intent: greet\n" + f" - active_loop: null" + ) + + reader = YAMLStoryReader(RegexInterpreter(), default_domain) + yaml_content = io_utils.read_yaml(stories) + + with pytest.warns(None) as record: + reader.read_from_parsed_yaml(yaml_content) + + assert not len(record) diff --git a/tests/core/training/story_writer/test_yaml_story_writer.py b/tests/core/training/story_writer/test_yaml_story_writer.py new file mode 100644 index 000000000000..7772e525c7cd --- /dev/null +++ b/tests/core/training/story_writer/test_yaml_story_writer.py @@ -0,0 +1,76 @@ +from pathlib import Path +from typing import Text + +import pytest + +from rasa.core.domain import Domain +from rasa.core.interpreter import RegexInterpreter +from rasa.core.training.story_reader.markdown_story_reader import MarkdownStoryReader +from rasa.core.training.story_reader.yaml_story_reader import YAMLStoryReader +from rasa.core.training.story_writer.yaml_story_writer import YAMLStoryWriter + + +@pytest.mark.parametrize( + "input_md_file, input_yaml_file", + [ + ["data/test_stories/stories.md", "data/test_yaml_stories/stories.yml"], + [ + "data/test_stories/stories_defaultdomain.md", + "data/test_yaml_stories/stories_defaultdomain.yml", + ], + ], +) +async def test_simple_story( + tmpdir: Path, default_domain: Domain, input_md_file: Text, input_yaml_file: Text +): + + original_md_reader = MarkdownStoryReader( + RegexInterpreter(), + default_domain, + None, + False, + input_yaml_file, + unfold_or_utterances=False, + ) + original_md_story_steps = await original_md_reader.read_from_file(input_md_file) + + original_yaml_reader = YAMLStoryReader( + RegexInterpreter(), default_domain, None, False + ) + original_yaml_story_steps = await original_yaml_reader.read_from_file( + input_yaml_file + ) + + target_story_filename = tmpdir / "test.yml" + writer = YAMLStoryWriter() + writer.dump(target_story_filename, original_md_story_steps) + + processed_yaml_reader = YAMLStoryReader( + RegexInterpreter(), default_domain, None, False + ) + processed_yaml_story_steps = await processed_yaml_reader.read_from_file( + target_story_filename + ) + + assert len(processed_yaml_story_steps) == len(original_yaml_story_steps) + for processed_step, original_step in zip( + processed_yaml_story_steps, original_yaml_story_steps + ): + assert len(processed_step.events) == len(original_step.events) + + +async def test_forms_are_skipped_with_warning(default_domain: Domain): + original_md_reader = MarkdownStoryReader( + RegexInterpreter(), default_domain, None, False, unfold_or_utterances=False, + ) + original_md_story_steps = await original_md_reader.read_from_file( + "data/test_stories/stories_form.md" + ) + + writer = YAMLStoryWriter() + + with pytest.warns(UserWarning) as record: + writer.dumps(original_md_story_steps) + + # We skip 5 stories with the forms and warn users + assert len(record) == 5 diff --git a/tests/core/utilities.py b/tests/core/utilities.py index 9f7038ff9e86..96790631e009 100644 --- a/tests/core/utilities.py +++ b/tests/core/utilities.py @@ -1,7 +1,8 @@ import itertools import contextlib -from typing import Text, List +import typing +from typing import Text, List, Optional, Text, Any, Dict import jsonpickle import os @@ -10,10 +11,16 @@ from rasa.core.domain import Domain from rasa.core.events import UserUttered, Event from rasa.core.trackers import DialogueStateTracker +from rasa.nlu.constants import INTENT_NAME_KEY from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS +if typing.TYPE_CHECKING: + from rasa.core.conversation import Dialogue -def tracker_from_dialogue_file(filename: Text, domain: Domain = None): + +def tracker_from_dialogue_file( + filename: Text, domain: Optional[Domain] = None +) -> DialogueStateTracker: dialogue = read_dialogue_file(filename) if not domain: @@ -24,17 +31,10 @@ def tracker_from_dialogue_file(filename: Text, domain: Domain = None): return tracker -def read_dialogue_file(filename: Text): +def read_dialogue_file(filename: Text) -> "Dialogue": return jsonpickle.loads(rasa.utils.io.read_file(filename)) -def write_text_to_file(tmpdir: Text, filename: Text, text: Text): - path = tmpdir.join(filename).strpath - with open(path, "w", encoding="utf-8") as f: - f.write(text) - return path - - @contextlib.contextmanager def cwd(path: Text): CWD = os.getcwd() @@ -47,7 +47,7 @@ def cwd(path: Text): @contextlib.contextmanager -def mocked_cmd_input(package, text): +def mocked_cmd_input(package, text: Text): if isinstance(text, str): text = [text] @@ -56,7 +56,7 @@ def mocked_cmd_input(package, text): def mocked_input(*args, **kwargs): value = next(text_generator) - print ("wrote '{}' to input".format(value)) + print(f"wrote '{value}' to input") return value package.get_user_input = mocked_input @@ -66,10 +66,19 @@ def mocked_input(*args, **kwargs): package.get_user_input = i -def user_uttered(text: Text, confidence: float) -> UserUttered: - parse_data = {"intent": {"name": text, "confidence": confidence}} +def user_uttered( + text: Text, + confidence: float = 1.0, + metadata: Dict[Text, Any] = None, + timestamp: Optional[float] = None, +) -> UserUttered: + parse_data = {"intent": {INTENT_NAME_KEY: text, "confidence": confidence}} return UserUttered( - text="Random", intent=parse_data["intent"], parse_data=parse_data + text="Random", + intent=parse_data["intent"], + parse_data=parse_data, + metadata=metadata, + timestamp=timestamp, ) diff --git a/tests/examples/test_example_bots_training_data.py b/tests/examples/test_example_bots_training_data.py new file mode 100644 index 000000000000..fee0156c0358 --- /dev/null +++ b/tests/examples/test_example_bots_training_data.py @@ -0,0 +1,55 @@ +from typing import Text + +import pytest + +from rasa.importers.importer import TrainingDataImporter + + +@pytest.mark.parametrize( + "config_file, domain_file, data_folder", + [ + ( + "examples/concertbot/config.yml", + "examples/concertbot/domain.yml", + "examples/concertbot/data", + ), + ( + "examples/knowledgebasebot/config.yml", + "examples/knowledgebasebot/domain.yml", + "examples/knowledgebasebot/data", + ), + ( + "examples/moodbot/config.yml", + "examples/moodbot/domain.yml", + "examples/moodbot/data", + ), + ( + "examples/reminderbot/config.yml", + "examples/reminderbot/domain.yml", + "examples/reminderbot/data", + ), + ( + "examples/rules/config.yml", + "examples/rules/domain.yml", + "examples/rules/data", + ), + ( + "rasa/cli/initial_project/config.yml", + "rasa/cli/initial_project/domain.yml", + "rasa/cli/initial_project/data", + ), + ], +) +async def test_example_bot_training_data_not_raises( + config_file: Text, domain_file: Text, data_folder: Text +): + + importer = TrainingDataImporter.load_from_config( + config_file, domain_file, data_folder + ) + + with pytest.warns(None) as record: + await importer.get_nlu_data() + await importer.get_stories() + + assert not len(record) diff --git a/tests/import_time.py b/tests/import_time.py index 1bdaf28b25a1..669c8672facd 100644 --- a/tests/import_time.py +++ b/tests/import_time.py @@ -16,12 +16,12 @@ def _average_import_time(n: int, module: Text) -> float: for _ in range(n): lines = subprocess.getoutput( - '{} -X importtime -c "import {}"'.format(sys.executable, module) + f'{sys.executable} -X importtime -c "import {module}"' ).splitlines() parts = lines[-1].split("|") if parts[-1].strip() != module: - raise Exception("Import time not found for {}.".format(module)) + raise Exception(f"Import time not found for {module}.") total += int(parts[1].strip()) / 1000000 diff --git a/tests/importers/test_autoconfig.py b/tests/importers/test_autoconfig.py new file mode 100644 index 000000000000..9959177e4f32 --- /dev/null +++ b/tests/importers/test_autoconfig.py @@ -0,0 +1,168 @@ +import shutil +from pathlib import Path +import sys +from typing import Text, Set +from unittest.mock import Mock + +import pytest +from _pytest.capture import CaptureFixture +from _pytest.monkeypatch import MonkeyPatch + +from rasa.constants import CONFIG_AUTOCONFIGURABLE_KEYS +from rasa.importers import autoconfig +from rasa.utils import io as io_utils + +CONFIG_FOLDER = Path("data/test_config") + +SOME_CONFIG = CONFIG_FOLDER / "stack_config.yml" +DEFAULT_CONFIG = Path("rasa/importers/default_config.yml") +DEFAULT_CONFIG_WINDOWS = Path("rasa/importers/default_config_windows.yml") + + +@pytest.mark.parametrize( + "config_path, autoconfig_keys", + [ + (Path("rasa/cli/initial_project/config.yml"), {"pipeline", "policies"}), + (CONFIG_FOLDER / "config_policies_empty.yml", {"policies"}), + (CONFIG_FOLDER / "config_pipeline_empty.yml", {"pipeline"}), + (CONFIG_FOLDER / "config_policies_missing.yml", {"policies"}), + (CONFIG_FOLDER / "config_pipeline_missing.yml", {"pipeline"}), + (SOME_CONFIG, set()), + ], +) +def test_get_configuration( + config_path: Path, autoconfig_keys: Set[Text], monkeypatch: MonkeyPatch +): + def _auto_configure(_, keys_to_configure: Set[Text]) -> Set[Text]: + return keys_to_configure + + monkeypatch.setattr(autoconfig, "_dump_config", Mock()) + monkeypatch.setattr(autoconfig, "_auto_configure", _auto_configure) + + config = autoconfig.get_configuration(str(config_path)) + + if autoconfig_keys: + expected_config = _auto_configure(config, autoconfig_keys) + else: + expected_config = config + + assert sorted(config) == sorted(expected_config) + + +@pytest.mark.parametrize("config_file", ("non_existent_config.yml", None)) +def test_get_configuration_missing_file(tmp_path: Path, config_file: Text): + if config_file: + config_file = tmp_path / "non_existent_config.yml" + + config = autoconfig.get_configuration(str(config_file)) + + assert config == {} + + +@pytest.mark.parametrize( + "keys_to_configure", ({"policies"}, {"pipeline"}, {"policies", "pipeline"}) +) +def test_auto_configure(keys_to_configure: Set[Text]): + if sys.platform == "win32": + default_config = io_utils.read_config_file(DEFAULT_CONFIG_WINDOWS) + else: + default_config = io_utils.read_config_file(DEFAULT_CONFIG) + + config = autoconfig._auto_configure({}, keys_to_configure) + + for k in keys_to_configure: + assert config[k] == default_config[k] # given keys are configured correctly + + assert len(config) == len(keys_to_configure) # no other keys are configured + + +@pytest.mark.parametrize( + "config_path, missing_keys", + [ + (CONFIG_FOLDER / "config_language_only.yml", {"pipeline", "policies"}), + (CONFIG_FOLDER / "config_policies_missing.yml", {"policies"}), + (CONFIG_FOLDER / "config_pipeline_missing.yml", {"pipeline"}), + (SOME_CONFIG, []), + ], +) +def test_add_missing_config_keys_to_file( + tmp_path: Path, config_path: Path, missing_keys: Set[Text] +): + config_file = str(tmp_path / "config.yml") + shutil.copyfile(str(config_path), config_file) + + autoconfig._add_missing_config_keys_to_file(config_file, missing_keys) + + config_after_addition = io_utils.read_config_file(config_file) + + assert all(key in config_after_addition for key in missing_keys) + + +def test_dump_config_missing_file(tmp_path: Path, capsys: CaptureFixture): + + config_path = tmp_path / "non_existent_config.yml" + + config = io_utils.read_config_file(str(SOME_CONFIG)) + + autoconfig._dump_config(config, str(config_path), set(), {"policies"}) + + assert not config_path.exists() + + captured = capsys.readouterr() + assert "has been removed or modified" in captured.out + + +# Test a few cases that are known to be potentially tricky (have failed in the past) +@pytest.mark.parametrize( + "input_file, expected_file, expected_file_windows, autoconfig_keys", + [ + ( + "config_with_comments.yml", + "config_with_comments_after_dumping.yml", + "config_with_comments_after_dumping.yml", + {"policies"}, + ), # comments in various positions + ( + "config_empty.yml", + "config_empty_after_dumping.yml", + "config_empty_after_dumping_windows.yml", + {"policies", "pipeline"}, + ), # no empty lines + ( + "config_with_comments_after_dumping.yml", + "config_with_comments_after_dumping.yml", + "config_with_comments_after_dumping.yml", + {"policies"}, + ), # with previous auto config that needs to be overwritten + ], +) +def test_dump_config( + tmp_path: Path, + input_file: Text, + expected_file: Text, + expected_file_windows: Text, + capsys: CaptureFixture, + autoconfig_keys: Set[Text], +): + config_file = str(tmp_path / "config.yml") + shutil.copyfile(str(CONFIG_FOLDER / input_file), config_file) + + autoconfig.get_configuration(config_file) + + actual = io_utils.read_file(config_file) + + if sys.platform == "win32": + expected = io_utils.read_file(str(CONFIG_FOLDER / expected_file_windows)) + else: + expected = io_utils.read_file(str(CONFIG_FOLDER / expected_file)) + + assert actual == expected + + captured = capsys.readouterr() + assert "does not exist or is empty" not in captured.out + + for k in CONFIG_AUTOCONFIGURABLE_KEYS: + if k in autoconfig_keys: + assert k in captured.out + else: + assert k not in captured.out diff --git a/tests/importers/test_importer.py b/tests/importers/test_importer.py index 8342ed9b08cc..799786531346 100644 --- a/tests/importers/test_importer.py +++ b/tests/importers/test_importer.py @@ -104,9 +104,7 @@ def test_load_from_config(tmpdir: Path): config_path = str(tmpdir / "config.yml") - io_utils.write_yaml_file( - {"importers": [{"name": "MultiProjectImporter"}]}, config_path - ) + io_utils.write_yaml({"importers": [{"name": "MultiProjectImporter"}]}, config_path) importer = TrainingDataImporter.load_from_config(config_path) assert isinstance(importer, CombinedDataImporter) diff --git a/tests/importers/test_multi_project.py b/tests/importers/test_multi_project.py index 664c434ee0cd..bff2d51b6813 100644 --- a/tests/importers/test_multi_project.py +++ b/tests/importers/test_multi_project.py @@ -4,12 +4,16 @@ from _pytest.tmpdir import TempdirFactory import os +from rasa.constants import ( + DEFAULT_CORE_SUBDIRECTORY_NAME, + DEFAULT_DOMAIN_PATH, + DEFAULT_E2E_TESTS_PATH, +) from rasa.nlu.training_data.formats import RasaReader from rasa import model from rasa.core import utils from rasa.core.domain import Domain from rasa.importers.multi_project import MultiProjectImporter -from rasa.train import train_async def test_load_imports_from_directory_tree(tmpdir_factory: TempdirFactory): @@ -43,10 +47,10 @@ def test_load_imports_from_directory_tree(tmpdir_factory: TempdirFactory): subdirectory_3 = root / "Project C" subdirectory_3.mkdir() - expected = { + expected = [ os.path.join(str(project_a_directory)), os.path.join(str(project_b_directory)), - } + ] actual = MultiProjectImporter(str(root / "config.yml")) @@ -79,11 +83,11 @@ def test_load_from_none(input_dict: Dict, tmpdir_factory: TempdirFactory): actual = MultiProjectImporter(str(config_path)) - assert actual._imports == set() + assert actual._imports == list() def test_load_if_subproject_is_more_specific_than_parent( - tmpdir_factory: TempdirFactory + tmpdir_factory: TempdirFactory, ): root = tmpdir_factory.mktemp("Parent Bot") config_path = str(root / "config.yml") @@ -145,7 +149,7 @@ def test_cyclic_imports(tmpdir_factory): actual = MultiProjectImporter(str(root / "config.yml")) - assert actual._imports == {str(project_a_directory), str(project_b_directory)} + assert actual._imports == [str(project_a_directory), str(project_b_directory)] def test_import_outside_project_directory(tmpdir_factory): @@ -169,7 +173,7 @@ def test_import_outside_project_directory(tmpdir_factory): actual = MultiProjectImporter(str(project_a_directory / "config.yml")) - assert actual._imports == {str(project_b_directory), str(root / "Project C")} + assert actual._imports == [str(project_b_directory), str(root / "Project C")] def test_importing_additional_files(tmpdir_factory): @@ -210,6 +214,90 @@ def test_not_importing_not_relevant_additional_files(tmpdir_factory): assert not selector.is_imported(str(not_relevant_file2)) +async def test_only_getting_e2e_conversation_tests_if_e2e_enabled( + tmpdir_factory: TempdirFactory, +): + from rasa.core.interpreter import RegexInterpreter + from rasa.core.training.structures import StoryGraph + import rasa.core.training.loading as core_loading + + root = tmpdir_factory.mktemp("Parent Bot") + config = {"imports": ["bots/Bot A"]} + config_path = str(root / "config.yml") + utils.dump_obj_as_yaml_to_file(config_path, config) + + story_file = root / "bots" / "Bot A" / "data" / "stories.md" + story_file.write( + """ + ## story + * greet + - utter_greet + """, + ensure=True, + ) + + e2e_story_test_file = ( + root / "bots" / "Bot A" / DEFAULT_E2E_TESTS_PATH / "conversation_tests.md" + ) + e2e_story_test_file.write( + """ + ## story test + * greet : "hello" + - utter_greet + """, + ensure=True, + ) + + selector = MultiProjectImporter(config_path) + + story_steps = await core_loading.load_data_from_resource( + resource=str(e2e_story_test_file), + domain=Domain.empty(), + interpreter=RegexInterpreter(), + template_variables=None, + use_e2e=True, + exclusion_percentage=None, + ) + + expected = StoryGraph(story_steps) + + actual = await selector.get_stories(use_e2e=True) + + assert expected.as_story_string() == actual.as_story_string() + + +def test_not_importing_e2e_conversation_tests_in_project( + tmpdir_factory: TempdirFactory, +): + root = tmpdir_factory.mktemp("Parent Bot") + config = {"imports": ["bots/Bot A"]} + config_path = str(root / "config.yml") + utils.dump_obj_as_yaml_to_file(config_path, config) + + story_file = root / "bots" / "Bot A" / "data" / "stories.md" + story_file.write("""## story""", ensure=True) + + e2e_story_test_file = ( + root / "bots" / "Bot A" / DEFAULT_E2E_TESTS_PATH / "conversation_tests.md" + ) + e2e_story_test_file.write("""## story test""", ensure=True) + + selector = MultiProjectImporter(config_path) + + # Conversation tests should not be included in story paths + expected = { + "story_paths": [str(story_file)], + "e2e_story_paths": [str(e2e_story_test_file)], + } + + actual = { + "story_paths": selector._story_paths, + "e2e_story_paths": selector._e2e_story_paths, + } + + assert expected == actual + + def test_single_additional_file(tmpdir_factory): root = tmpdir_factory.mktemp("Parent Bot") config_path = str(root / "config.yml") @@ -226,12 +314,13 @@ def test_single_additional_file(tmpdir_factory): assert selector.is_imported(str(additional_file)) -async def test_multi_project_training(): +async def test_multi_project_training(trained_async): example_directory = "data/test_multi_domain" config_file = os.path.join(example_directory, "config.yml") domain_file = os.path.join(example_directory, "domain.yml") files_of_root_project = os.path.join(example_directory, "data") - trained_stack_model_path = await train_async( + + trained_stack_model_path = await trained_async( config=config_file, domain=domain_file, training_files=files_of_root_project, @@ -241,7 +330,9 @@ async def test_multi_project_training(): unpacked = model.unpack_model(trained_stack_model_path) - domain_file = os.path.join(unpacked, "core", "domain.yml") + domain_file = os.path.join( + unpacked, DEFAULT_CORE_SUBDIRECTORY_NAME, DEFAULT_DOMAIN_PATH + ) domain = Domain.load(domain_file) expected_intents = { diff --git a/tests/importers/test_rasa.py b/tests/importers/test_rasa.py index 5328df3b8188..ad871063c09d 100644 --- a/tests/importers/test_rasa.py +++ b/tests/importers/test_rasa.py @@ -3,6 +3,7 @@ import os from rasa.constants import DEFAULT_CONFIG_PATH, DEFAULT_DOMAIN_PATH, DEFAULT_DATA_PATH +from rasa.core.constants import DEFAULT_INTENTS from rasa.core.domain import Domain from rasa.importers.importer import TrainingDataImporter from rasa.importers.rasa import RasaFileImporter @@ -19,10 +20,10 @@ async def test_rasa_file_importer(project: Text): importer = RasaFileImporter(config_path, domain_path, [default_data_path]) domain = await importer.get_domain() - assert len(domain.intents) == 7 + assert len(domain.intents) == 7 + len(DEFAULT_INTENTS) assert domain.slots == [] assert domain.entities == [] - assert len(domain.action_names) == 14 + assert len(domain.action_names) == 17 assert len(domain.templates) == 6 stories = await importer.get_stories() @@ -30,7 +31,7 @@ async def test_rasa_file_importer(project: Text): nlu_data = await importer.get_nlu_data("en") assert len(nlu_data.intents) == 7 - assert len(nlu_data.intent_examples) == 43 + assert len(nlu_data.intent_examples) == 68 async def test_rasa_file_importer_with_invalid_config(): diff --git a/tests/nlu/base/test_components.py b/tests/nlu/base/test_components.py deleted file mode 100644 index 947bbc89600c..000000000000 --- a/tests/nlu/base/test_components.py +++ /dev/null @@ -1,103 +0,0 @@ -import pytest - -from rasa.nlu import registry -from rasa.nlu.components import find_unavailable_packages -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.model import Metadata -from tests.nlu import utilities - - -@pytest.mark.parametrize("component_class", registry.component_classes) -def test_no_components_with_same_name(component_class): - """The name of the components need to be unique as they will - be referenced by name when defining processing pipelines.""" - - names = [cls.name for cls in registry.component_classes] - assert ( - names.count(component_class.name) == 1 - ), "There is more than one component named {}".format(component_class.name) - - -@pytest.mark.parametrize("pipeline_template", registry.registered_pipeline_templates) -def test_all_components_in_model_templates_exist(pipeline_template): - """We provide a couple of ready to use pipelines, this test ensures - all components referenced by name in the - pipeline definitions are available.""" - - components = registry.registered_pipeline_templates[pipeline_template] - for component in components: - assert ( - component["name"] in registry.registered_components - ), "Model template contains unknown component." - - -@pytest.mark.parametrize("component_class", registry.component_classes) -def test_all_arguments_can_be_satisfied(component_class): - """Check that `train` method parameters can be filled - filled from the context. Similar to `pipeline_init` test.""" - - # All available context arguments that will ever be generated during train - # it might still happen, that in a certain pipeline - # configuration arguments can not be satisfied! - provided_properties = { - provided for c in registry.component_classes for provided in c.provides - } - - for req in component_class.requires: - assert req in provided_properties, "No component provides required property." - - -def test_find_unavailable_packages(): - unavailable = find_unavailable_packages( - ["my_made_up_package_name", "io", "foo_bar", "foo_bar"] - ) - assert unavailable == {"my_made_up_package_name", "foo_bar"} - - -def test_builder_create_by_module_path(component_builder, default_config): - from rasa.nlu.featurizers.regex_featurizer import RegexFeaturizer - - path = "rasa.nlu.featurizers.regex_featurizer.RegexFeaturizer" - component_config = {"name": path} - component = component_builder.create_component(component_config, default_config) - assert type(component) == RegexFeaturizer - - -@pytest.mark.parametrize( - "test_input, expected_output", - [ - ("my_made_up_component", "Cannot find class"), - ("rasa.nlu.featurizers.regex_featurizer.MadeUpClass", "Failed to find class"), - ("made.up.path.RegexFeaturizer", "No module named"), - ], -) -def test_create_component_exception_messages( - component_builder, default_config, test_input, expected_output -): - - with pytest.raises(Exception): - component_config = {"name": test_input} - component_builder.create_component(component_config, default_config) - - -def test_builder_load_unknown(component_builder): - with pytest.raises(Exception) as excinfo: - component_meta = {"name": "my_made_up_componment"} - component_builder.load_component(component_meta, "", Metadata({}, None)) - assert "Cannot find class" in str(excinfo.value) - - -async def test_example_component(component_builder, tmpdir_factory): - conf = RasaNLUModelConfig( - {"pipeline": [{"name": "tests.nlu.example_component.MyComponent"}]} - ) - - interpreter = await utilities.interpreter_for( - component_builder, - data="./data/examples/rasa/demo-rasa.json", - path=tmpdir_factory.mktemp("projects").strpath, - config=conf, - ) - - r = interpreter.parse("test") - assert r is not None diff --git a/tests/nlu/base/test_config.py b/tests/nlu/base/test_config.py deleted file mode 100644 index be729075adb3..000000000000 --- a/tests/nlu/base/test_config.py +++ /dev/null @@ -1,83 +0,0 @@ -import json -import tempfile -from typing import Text - -import pytest - -import rasa.utils.io -from rasa.nlu import config -from rasa.nlu.components import ComponentBuilder -from rasa.nlu.registry import registered_pipeline_templates -from tests.nlu.conftest import CONFIG_DEFAULTS_PATH -from tests.nlu.utilities import write_file_config - -defaults = rasa.utils.io.read_config_file(CONFIG_DEFAULTS_PATH) - - -def test_default_config(default_config): - assert default_config.as_dict() == defaults - - -def test_blank_config(): - file_config = {} - f = write_file_config(file_config) - final_config = config.load(f.name) - assert final_config.as_dict() == defaults - - -def test_invalid_config_json(): - file_config = """pipeline: [pretrained_embeddings_spacy""" # invalid yaml - with tempfile.NamedTemporaryFile("w+", suffix="_tmp_config_file.json") as f: - f.write(file_config) - f.flush() - with pytest.raises(config.InvalidConfigError): - config.load(f.name) - - -def test_invalid_pipeline_template(): - args = {"pipeline": "my_made_up_name"} - f = write_file_config(args) - with pytest.raises(config.InvalidConfigError) as execinfo: - config.load(f.name) - assert "unknown pipeline template" in str(execinfo.value) - - -@pytest.mark.parametrize( - "pipeline_template", list(registered_pipeline_templates.keys()) -) -def test_pipeline_registry_lookup(pipeline_template: Text): - args = {"pipeline": pipeline_template} - f = write_file_config(args) - final_config = config.load(f.name) - components = [c for c in final_config.pipeline] - - assert json.dumps(components, sort_keys=True) == json.dumps( - registered_pipeline_templates[pipeline_template], sort_keys=True - ) - - -def test_default_config_file(): - final_config = config.RasaNLUModelConfig() - assert len(final_config) > 1 - - -def test_set_attr_on_component(): - cfg = config.load("sample_configs/config_pretrained_embeddings_spacy.yml") - cfg.set_component_attr(6, C=324) - - assert cfg.for_component(1) == {"name": "SpacyTokenizer"} - assert cfg.for_component(6) == {"name": "SklearnIntentClassifier", "C": 324} - - -def test_override_defaults_supervised_embeddings_pipeline(): - cfg = config.load("data/test/config_embedding_test.yml") - builder = ComponentBuilder() - - component1_cfg = cfg.for_component(0) - - component1 = builder.create_component(component1_cfg, cfg) - assert component1.max_ngram == 3 - - component2_cfg = cfg.for_component(1) - component2 = builder.create_component(component2_cfg, cfg) - assert component2.epochs == 10 diff --git a/tests/nlu/base/test_emulators.py b/tests/nlu/base/test_emulators.py deleted file mode 100644 index 2d21e966b909..000000000000 --- a/tests/nlu/base/test_emulators.py +++ /dev/null @@ -1,152 +0,0 @@ -def test_luis_request(): - from rasa.nlu.emulators.luis import LUISEmulator - - em = LUISEmulator() - norm = em.normalise_request_json({"text": ["arb text"]}) - assert norm == {"text": "arb text", "time": None} - - -def test_luis_response(): - from rasa.nlu.emulators.luis import LUISEmulator - - em = LUISEmulator() - data = { - "text": "I want italian food", - "intent": {"name": "restaurant_search", "confidence": 0.737014589341683}, - "intent_ranking": [ - {"confidence": 0.737014589341683, "name": "restaurant_search"}, - {"confidence": 0.11605464483122209, "name": "goodbye"}, - {"confidence": 0.08816417744097163, "name": "greet"}, - {"confidence": 0.058766588386123204, "name": "affirm"}, - ], - "entities": [{"entity": "cuisine", "value": "italian"}], - } - norm = em.normalise_response_json(data) - assert norm == { - "query": data["text"], - "topScoringIntent": {"intent": "restaurant_search", "score": 0.737014589341683}, - "intents": [ - {"intent": "restaurant_search", "score": 0.737014589341683}, - {"intent": "goodbye", "score": 0.11605464483122209}, - {"intent": "greet", "score": 0.08816417744097163}, - {"intent": "affirm", "score": 0.058766588386123204}, - ], - "entities": [ - { - "entity": e["value"], - "type": e["entity"], - "startIndex": None, - "endIndex": None, - "score": None, - } - for e in data["entities"] - ], - } - - -def test_wit_request(): - from rasa.nlu.emulators.wit import WitEmulator - - em = WitEmulator() - norm = em.normalise_request_json({"text": ["arb text"]}) - assert norm == {"text": "arb text", "time": None} - - -def test_wit_response(): - from rasa.nlu.emulators.wit import WitEmulator - - em = WitEmulator() - data = { - "text": "I want italian food", - "intent": {"name": "inform", "confidence": 0.4794813722432127}, - "entities": [{"entity": "cuisine", "value": "italian", "start": 7, "end": 14}], - } - norm = em.normalise_response_json(data) - assert norm == [ - { - "entities": { - "cuisine": { - "confidence": None, - "type": "value", - "value": "italian", - "start": 7, - "end": 14, - } - }, - "intent": "inform", - "_text": "I want italian food", - "confidence": 0.4794813722432127, - } - ] - - -def test_dialogflow_request(): - from rasa.nlu.emulators.dialogflow import DialogflowEmulator - - em = DialogflowEmulator() - norm = em.normalise_request_json({"text": ["arb text"]}) - assert norm == {"text": "arb text", "time": None} - - -def test_dialogflow_response(): - from rasa.nlu.emulators.dialogflow import DialogflowEmulator - - em = DialogflowEmulator() - data = { - "text": "I want italian food", - "intent": {"name": "inform", "confidence": 0.4794813722432127}, - "entities": [{"entity": "cuisine", "value": "italian", "start": 7, "end": 14}], - } - norm = em.normalise_response_json(data) - - assert norm == { - "id": norm["id"], - "result": { - "action": data["intent"]["name"], - "actionIncomplete": False, - "contexts": [], - "fulfillment": {}, - "metadata": { - "intentId": norm["result"]["metadata"]["intentId"], - "intentName": data["intent"]["name"], - "webhookUsed": "false", - }, - "parameters": {"cuisine": ["italian"]}, - "resolvedQuery": data["text"], - "score": data["intent"]["confidence"], - "source": "agent", - }, - "sessionId": norm["sessionId"], - "status": {"code": 200, "errorType": "success"}, - "timestamp": norm["timestamp"], - } - - -def test_dummy_request(): - from rasa.nlu.emulators.no_emulator import NoEmulator - - em = NoEmulator() - norm = em.normalise_request_json({"text": ["arb text"]}) - assert norm == {"text": "arb text", "time": None} - - norm = em.normalise_request_json({"text": ["arb text"], "time": "1499279161658"}) - assert norm == {"text": "arb text", "time": "1499279161658"} - - -def test_dummy_response(): - from rasa.nlu.emulators.no_emulator import NoEmulator - - em = NoEmulator() - data = {"intent": "greet", "text": "hi", "entities": {}, "confidence": 1.0} - assert em.normalise_response_json(data) == data - - -def test_emulators_can_handle_missing_data(): - from rasa.nlu.emulators.luis import LUISEmulator - - em = LUISEmulator() - norm = em.normalise_response_json( - {"text": "this data doesn't contain an intent result"} - ) - assert norm["topScoringIntent"] is None - assert norm["intents"] == [] diff --git a/tests/nlu/base/test_extractors.py b/tests/nlu/base/test_extractors.py deleted file mode 100644 index 4ce6cb30b9f8..000000000000 --- a/tests/nlu/base/test_extractors.py +++ /dev/null @@ -1,532 +0,0 @@ -# coding=utf-8 -import responses - -from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.training_data import TrainingData, Message -from tests.nlu import utilities - - -def test_crf_extractor(spacy_nlp, ner_crf_pos_feature_config): - from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor - - ext = CRFEntityExtractor(component_config=ner_crf_pos_feature_config) - examples = [ - Message( - "anywhere in the west", - { - "intent": "restaurant_search", - "entities": [ - {"start": 16, "end": 20, "value": "west", "entity": "location"} - ], - "spacy_doc": spacy_nlp("anywhere in the west"), - }, - ), - Message( - "central indian restaurant", - { - "intent": "restaurant_search", - "entities": [ - { - "start": 0, - "end": 7, - "value": "central", - "entity": "location", - "extractor": "random_extractor", - }, - { - "start": 8, - "end": 14, - "value": "indian", - "entity": "cuisine", - "extractor": "CRFEntityExtractor", - }, - ], - "spacy_doc": spacy_nlp("central indian restaurant"), - }, - ), - ] - - # uses BILOU and the default features - ext.train(TrainingData(training_examples=examples), RasaNLUModelConfig()) - sentence = "anywhere in the west" - doc = {"spacy_doc": spacy_nlp(sentence)} - crf_format = ext._from_text_to_crf(Message(sentence, doc)) - assert [word[0] for word in crf_format] == ["anywhere", "in", "the", "west"] - feats = ext._sentence_to_features(crf_format) - assert "BOS" in feats[0] - assert "EOS" in feats[-1] - assert feats[1]["0:low"] == "in" - sentence = "anywhere in the west" - ext.extract_entities(Message(sentence, {"spacy_doc": spacy_nlp(sentence)})) - filtered = ext.filter_trainable_entities(examples) - assert filtered[0].get("entities") == [ - {"start": 16, "end": 20, "value": "west", "entity": "location"} - ], "Entity without extractor remains" - assert filtered[1].get("entities") == [ - { - "start": 8, - "end": 14, - "value": "indian", - "entity": "cuisine", - "extractor": "CRFEntityExtractor", - } - ], "Only CRFEntityExtractor entity annotation remains" - assert examples[1].get("entities")[0] == { - "start": 0, - "end": 7, - "value": "central", - "entity": "location", - "extractor": "random_extractor", - }, "Original examples are not mutated" - - -def test_crf_json_from_BILOU(spacy_nlp, ner_crf_pos_feature_config): - from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor - - ext = CRFEntityExtractor(component_config=ner_crf_pos_feature_config) - sentence = "I need a home cleaning close-by" - doc = {"spacy_doc": spacy_nlp(sentence)} - r = ext._from_crf_to_json( - Message(sentence, doc), - [ - {"O": 1.0}, - {"O": 1.0}, - {"O": 1.0}, - {"B-what": 1.0}, - {"L-what": 1.0}, - {"B-where": 1.0}, - {"I-where": 1.0}, - {"L-where": 1.0}, - ], - ) - assert len(r) == 2, "There should be two entities" - - assert r[0]["confidence"] # confidence should exist - del r[0]["confidence"] - assert r[0] == {"start": 9, "end": 22, "value": "home cleaning", "entity": "what"} - - assert r[1]["confidence"] # confidence should exist - del r[1]["confidence"] - assert r[1] == {"start": 23, "end": 31, "value": "close-by", "entity": "where"} - - -def test_crf_json_from_non_BILOU(spacy_nlp, ner_crf_pos_feature_config): - from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor - - ner_crf_pos_feature_config.update({"BILOU_flag": False}) - ext = CRFEntityExtractor(component_config=ner_crf_pos_feature_config) - sentence = "I need a home cleaning close-by" - doc = {"spacy_doc": spacy_nlp(sentence)} - rs = ext._from_crf_to_json( - Message(sentence, doc), - [ - {"O": 1.0}, - {"O": 1.0}, - {"O": 1.0}, - {"what": 1.0}, - {"what": 1.0}, - {"where": 1.0}, - {"where": 1.0}, - {"where": 1.0}, - ], - ) - - # non BILOU will split multi-word entities - hence 5 - assert len(rs) == 5, "There should be five entities" - - for r in rs: - assert r["confidence"] # confidence should exist - del r["confidence"] - - assert rs[0] == {"start": 9, "end": 13, "value": "home", "entity": "what"} - assert rs[1] == {"start": 14, "end": 22, "value": "cleaning", "entity": "what"} - assert rs[2] == {"start": 23, "end": 28, "value": "close", "entity": "where"} - assert rs[3] == {"start": 28, "end": 29, "value": "-", "entity": "where"} - assert rs[4] == {"start": 29, "end": 31, "value": "by", "entity": "where"} - - -def test_crf_create_entity_dict(spacy_nlp): - from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor - from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer - from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer - - crf_extractor = CRFEntityExtractor() - spacy_tokenizer = SpacyTokenizer() - white_space_tokenizer = WhitespaceTokenizer() - - examples = [ - { - "message": Message( - "where is St. Michael's Hospital?", - { - "intent": "search_location", - "entities": [ - { - "start": 9, - "end": 31, - "value": "St. Michael's Hospital", - "entity": "hospital", - "SpacyTokenizer": { - "entity_start_token_idx": 2, - "entity_end_token_idx": 5, - }, - "WhitespaceTokenizer": { - "entity_start_token_idx": 2, - "entity_end_token_idx": 5, - }, - } - ], - }, - ) - }, - { - "message": Message( - "where is Children's Hospital?", - { - "intent": "search_location", - "entities": [ - { - "start": 9, - "end": 28, - "value": "Children's Hospital", - "entity": "hospital", - "SpacyTokenizer": { - "entity_start_token_idx": 2, - "entity_end_token_idx": 4, - }, - "WhitespaceTokenizer": { - "entity_start_token_idx": 2, - "entity_end_token_idx": 4, - }, - } - ], - }, - ) - }, - ] - for ex in examples: - # spacy tokenizers receives a Doc as input and whitespace tokenizer receives a text - spacy_tokens = spacy_tokenizer.tokenize(spacy_nlp(ex["message"].text)) - white_space_tokens = white_space_tokenizer.tokenize(ex["message"].text) - for tokenizer, tokens in [ - ("SpacyTokenizer", spacy_tokens), - ("WhitespaceTokenizer", white_space_tokens), - ]: - for entity in ex["message"].get("entities"): - parsed_entities = crf_extractor._create_entity_dict( - ex["message"], - tokens, - entity[tokenizer]["entity_start_token_idx"], - entity[tokenizer]["entity_end_token_idx"], - entity["entity"], - 0.8, - ) - assert parsed_entities == { - "start": entity["start"], - "end": entity["end"], - "value": entity["value"], - "entity": entity["entity"], - "confidence": 0.8, - } - - -def test_duckling_entity_extractor(component_builder): - with responses.RequestsMock() as rsps: - rsps.add( - responses.POST, - "http://localhost:8000/parse", - json=[ - { - "body": "Today", - "start": 0, - "value": { - "values": [ - { - "value": "2018-11-13T00:00:00.000-08:00", - "grain": "day", - "type": "value", - } - ], - "value": "2018-11-13T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - "end": 5, - "dim": "time", - "latent": False, - }, - { - "body": "the 5th", - "start": 9, - "value": { - "values": [ - { - "value": "2018-12-05T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - { - "value": "2019-01-05T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - { - "value": "2019-02-05T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - ], - "value": "2018-12-05T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - "end": 16, - "dim": "time", - "latent": False, - }, - { - "body": "5th of May", - "start": 13, - "value": { - "values": [ - { - "value": "2019-05-05T00:00:00.000-07:00", - "grain": "day", - "type": "value", - }, - { - "value": "2020-05-05T00:00:00.000-07:00", - "grain": "day", - "type": "value", - }, - { - "value": "2021-05-05T00:00:00.000-07:00", - "grain": "day", - "type": "value", - }, - ], - "value": "2019-05-05T00:00:00.000-07:00", - "grain": "day", - "type": "value", - }, - "end": 23, - "dim": "time", - "latent": False, - }, - { - "body": "tomorrow", - "start": 37, - "value": { - "values": [ - { - "value": "2018-11-14T00:00:00.000-08:00", - "grain": "day", - "type": "value", - } - ], - "value": "2018-11-14T00:00:00.000-08:00", - "grain": "day", - "type": "value", - }, - "end": 45, - "dim": "time", - "latent": False, - }, - ], - ) - - _config = RasaNLUModelConfig({"pipeline": [{"name": "DucklingHTTPExtractor"}]}) - _config.set_component_attr( - 0, dimensions=["time"], timezone="UTC", url="http://localhost:8000" - ) - duckling = component_builder.create_component(_config.for_component(0), _config) - message = Message("Today is the 5th of May. Let us meet tomorrow.") - duckling.process(message) - entities = message.get("entities") - assert len(entities) == 4 - - # Test duckling with a defined date - - with responses.RequestsMock() as rsps: - rsps.add( - responses.POST, - "http://localhost:8000/parse", - json=[ - { - "body": "tomorrow", - "start": 12, - "value": { - "values": [ - { - "value": "2013-10-13T00:00:00.000Z", - "grain": "day", - "type": "value", - } - ], - "value": "2013-10-13T00:00:00.000Z", - "grain": "day", - "type": "value", - }, - "end": 20, - "dim": "time", - "latent": False, - } - ], - ) - - # 1381536182 == 2013/10/12 02:03:02 - message = Message("Let us meet tomorrow.", time="1381536182") - duckling.process(message) - entities = message.get("entities") - assert len(entities) == 1 - assert entities[0]["text"] == "tomorrow" - assert entities[0]["value"] == "2013-10-13T00:00:00.000Z" - - # Test dimension filtering includes only specified dimensions - _config = RasaNLUModelConfig({"pipeline": [{"name": "DucklingHTTPExtractor"}]}) - _config.set_component_attr( - 0, dimensions=["number"], url="http://localhost:8000" - ) - duckling_number = component_builder.create_component( - _config.for_component(0), _config - ) - - with responses.RequestsMock() as rsps: - rsps.add( - responses.POST, - "http://localhost:8000/parse", - json=[ - { - "body": "Yesterday", - "start": 0, - "value": { - "values": [ - { - "value": "2019-02-28T00:00:00.000+01:00", - "grain": "day", - "type": "value", - } - ], - "value": "2019-02-28T00:00:00.000+01:00", - "grain": "day", - "type": "value", - }, - "end": 9, - "dim": "time", - }, - { - "body": "5", - "start": 21, - "value": {"value": 5, "type": "value"}, - "end": 22, - "dim": "number", - }, - ], - ) - - message = Message("Yesterday there were 5 people in a room") - duckling_number.process(message) - entities = message.get("entities") - - assert len(entities) == 1 - assert entities[0]["text"] == "5" - assert entities[0]["value"] == 5 - - -def test_duckling_entity_extractor_and_synonyms(component_builder): - _config = RasaNLUModelConfig( - { - "pipeline": [ - {"name": "DucklingHTTPExtractor"}, - {"name": "EntitySynonymMapper"}, - ] - } - ) - _config.set_component_attr(0, dimensions=["number"]) - duckling = component_builder.create_component(_config.for_component(0), _config) - synonyms = component_builder.create_component(_config.for_component(1), _config) - message = Message("He was 6 feet away") - duckling.process(message) - # checks that the synonym processor - # can handle entities that have int values - synonyms.process(message) - assert message is not None - - -def test_unintentional_synonyms_capitalized(component_builder): - _config = utilities.base_test_conf("pretrained_embeddings_spacy") - ner_syn = component_builder.create_component(_config.for_component(5), _config) - examples = [ - Message( - "Any Mexican restaurant will do", - { - "intent": "restaurant_search", - "entities": [ - {"start": 4, "end": 11, "value": "Mexican", "entity": "cuisine"} - ], - }, - ), - Message( - "I want Tacos!", - { - "intent": "restaurant_search", - "entities": [ - {"start": 7, "end": 12, "value": "Mexican", "entity": "cuisine"} - ], - }, - ), - ] - ner_syn.train(TrainingData(training_examples=examples), _config) - assert ner_syn.synonyms.get("mexican") is None - assert ner_syn.synonyms.get("tacos") == "Mexican" - - -def test_spacy_ner_extractor(component_builder, spacy_nlp): - _config = RasaNLUModelConfig({"pipeline": [{"name": "SpacyEntityExtractor"}]}) - ext = component_builder.create_component(_config.for_component(0), _config) - example = Message( - "anywhere in the U.K.", - { - "intent": "restaurant_search", - "entities": [], - "spacy_doc": spacy_nlp("anywhere in the west"), - }, - ) - - ext.process(example, spacy_nlp=spacy_nlp) - - assert len(example.get("entities", [])) == 1 - assert example.get("entities")[0] == { - "start": 16, - "extractor": "SpacyEntityExtractor", - "end": 20, - "value": "U.K.", - "entity": "GPE", - "confidence": None, - } - - # Test dimension filtering includes only specified dimensions - - example = Message( - "anywhere in the West with Sebastian Thrun", - { - "intent": "example_intent", - "entities": [], - "spacy_doc": spacy_nlp("anywhere in the West with Sebastian Thrun"), - }, - ) - _config = RasaNLUModelConfig({"pipeline": [{"name": "SpacyEntityExtractor"}]}) - - _config.set_component_attr(0, dimensions=["PERSON"]) - ext = component_builder.create_component(_config.for_component(0), _config) - ext.process(example, spacy_nlp=spacy_nlp) - - assert len(example.get("entities", [])) == 1 - assert example.get("entities")[0] == { - "start": 26, - "extractor": "SpacyEntityExtractor", - "end": 41, - "value": "Sebastian Thrun", - "entity": "PERSON", - "confidence": None, - } diff --git a/tests/nlu/base/test_featurizers.py b/tests/nlu/base/test_featurizers.py deleted file mode 100644 index 51bedfa90b32..000000000000 --- a/tests/nlu/base/test_featurizers.py +++ /dev/null @@ -1,464 +0,0 @@ -# -*- coding: utf-8 - -import numpy as np -import pytest - -from rasa.nlu import training_data -from rasa.nlu.tokenizers import Token -from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer -from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer -from rasa.nlu.training_data import Message -from rasa.nlu.training_data import TrainingData -from rasa.nlu.config import RasaNLUModelConfig - - -@pytest.mark.parametrize( - "sentence, expected", - [ - ( - "hey how are you today", - [-0.19649599, 0.32493639, -0.37408298, -0.10622784, 0.062756], - ) - ], -) -def test_spacy_featurizer(sentence, expected, spacy_nlp): - from rasa.nlu.featurizers import spacy_featurizer - - doc = spacy_nlp(sentence) - vecs = spacy_featurizer.features_for_doc(doc) - assert np.allclose(doc.vector[:5], expected, atol=1e-5) - assert np.allclose(vecs, doc.vector, atol=1e-5) - - -def test_spacy_intent_featurizer(spacy_nlp_component): - from rasa.nlu.featurizers.spacy_featurizer import SpacyFeaturizer - - td = training_data.load_data("data/examples/rasa/demo-rasa.json") - spacy_nlp_component.train(td, config=None) - spacy_featurizer = SpacyFeaturizer() - spacy_featurizer.train(td, config=None) - - intent_features_exist = np.array( - [ - True if example.get("intent_features") is not None else False - for example in td.intent_examples - ] - ) - - # no intent features should have been set - assert not any(intent_features_exist) - - -def test_mitie_featurizer(mitie_feature_extractor, default_config): - from rasa.nlu.featurizers.mitie_featurizer import MitieFeaturizer - - mitie_component_config = {"name": "MitieFeaturizer"} - ftr = MitieFeaturizer.create(mitie_component_config, RasaNLUModelConfig()) - sentence = "Hey how are you today" - tokens = MitieTokenizer().tokenize(sentence) - vecs = ftr.features_for_tokens(tokens, mitie_feature_extractor) - expected = np.array([0.0, -4.4551446, 0.26073121, -1.46632245, -1.84205751]) - assert np.allclose(vecs[:5], expected, atol=1e-5) - - -def test_ngram_featurizer(spacy_nlp): - from rasa.nlu.featurizers.ngram_featurizer import NGramFeaturizer - - ftr = NGramFeaturizer({"max_number_of_ngrams": 10}) - - # ensures that during random sampling of the ngram CV we don't end up - # with a one-class-split - repetition_factor = 5 - - greet = {"intent": "greet", "text_features": [0.5]} - goodbye = {"intent": "goodbye", "text_features": [0.5]} - labeled_sentences = [ - Message("heyheyheyhey", greet), - Message("howdyheyhowdy", greet), - Message("heyhey howdyheyhowdy", greet), - Message("howdyheyhowdy heyhey", greet), - Message("astalavistasista", goodbye), - Message("astalavistasista sistala", goodbye), - Message("sistala astalavistasista", goodbye), - ] * repetition_factor - - for m in labeled_sentences: - m.set("spacy_doc", spacy_nlp(m.text)) - - ftr.min_intent_examples_for_ngram_classification = 2 - ftr.train_on_sentences(labeled_sentences) - assert len(ftr.all_ngrams) > 0 - assert ftr.best_num_ngrams > 0 - - -@pytest.mark.parametrize( - "sentence, expected, labeled_tokens", - [ - ("hey how are you today", [0.0, 1.0, 0.0], [0]), - ("hey 456 how are you", [1.0, 1.0, 0.0], [1, 0]), - ("blah balh random eh", [0.0, 0.0, 0.0], []), - ("a 1 digit number", [1.0, 0.0, 1.0], [1, 1]), - ], -) -def test_regex_featurizer(sentence, expected, labeled_tokens, spacy_nlp): - from rasa.nlu.featurizers.regex_featurizer import RegexFeaturizer - - patterns = [ - {"pattern": "[0-9]+", "name": "number", "usage": "intent"}, - {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, - {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, - ] - ftr = RegexFeaturizer(known_patterns=patterns) - - # adds tokens to the message - tokenizer = SpacyTokenizer() - message = Message(sentence) - message.set("spacy_doc", spacy_nlp(sentence)) - tokenizer.process(message) - - result = ftr.features_for_patterns(message) - assert np.allclose(result, expected, atol=1e-10) - - # the tokenizer should have added tokens - assert len(message.get("tokens", [])) > 0 - # the number of regex matches on each token should match - for i, token in enumerate(message.get("tokens")): - token_matches = token.get("pattern").values() - num_matches = sum(token_matches) - assert num_matches == labeled_tokens.count(i) - - -@pytest.mark.parametrize( - "sentence, expected, labeled_tokens", - [ - ("lemonade and mapo tofu", [1, 1], [0.0, 2.0, 3.0]), - ("a cup of tea", [1, 0], [3.0]), - ("Is burrito my favorite food?", [0, 1], [1.0]), - ("I want club?mate", [1, 0], [2.0, 3.0]), - ], -) -def test_lookup_tables(sentence, expected, labeled_tokens, spacy_nlp): - from rasa.nlu.featurizers.regex_featurizer import RegexFeaturizer - - lookups = [ - { - "name": "drinks", - "elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"], - }, - {"name": "plates", "elements": "data/test/lookup_tables/plates.txt"}, - ] - ftr = RegexFeaturizer(lookup_tables=lookups) - - # adds tokens to the message - tokenizer = SpacyTokenizer() - message = Message(sentence) - message.set("spacy_doc", spacy_nlp(sentence)) - tokenizer.process(message) - - result = ftr.features_for_patterns(message) - assert np.allclose(result, expected, atol=1e-10) - - # the tokenizer should have added tokens - assert len(message.get("tokens", [])) > 0 - # the number of regex matches on each token should match - for i, token in enumerate(message.get("tokens")): - token_matches = token.get("pattern").values() - num_matches = sum(token_matches) - assert num_matches == labeled_tokens.count(i) - - -def test_spacy_featurizer_casing(spacy_nlp): - from rasa.nlu.featurizers import spacy_featurizer - - # if this starts failing for the default model, we should think about - # removing the lower casing the spacy nlp component does when it - # retrieves vectors. For compressed spacy models (e.g. models - # ending in _sm) this test will most likely fail. - - td = training_data.load_data("data/examples/rasa/demo-rasa.json") - for e in td.intent_examples: - doc = spacy_nlp(e.text) - doc_capitalized = spacy_nlp(e.text.capitalize()) - - vecs = spacy_featurizer.features_for_doc(doc) - vecs_capitalized = spacy_featurizer.features_for_doc(doc_capitalized) - - assert np.allclose( - vecs, vecs_capitalized, atol=1e-5 - ), "Vectors are unequal for texts '{}' and '{}'".format( - e.text, e.text.capitalize() - ) - - -@pytest.mark.parametrize( - "sentence, expected", - [ - ("hello hello hello hello hello ", [5]), - ("hello goodbye hello", [1, 2]), - ("a b c d e f", [1, 1, 1, 1, 1, 1]), - ("a 1 2", [2, 1]), - ], -) -def test_count_vector_featurizer(sentence, expected): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer({"token_pattern": r"(?u)\b\w+\b"}) - train_message = Message(sentence) - # this is needed for a valid training example - train_message.set("intent", "bla") - data = TrainingData([train_message]) - ftr.train(data) - - test_message = Message(sentence) - ftr.process(test_message) - - assert np.all(test_message.get("text_features") == expected) - - -@pytest.mark.parametrize( - "sentence, intent, response, intent_features, response_features", - [ - ("hello hello hello hello hello ", "greet", None, [1], None), - ("hello goodbye hello", "greet", None, [1], None), - ("a 1 2", "char", "char char", [1], [2]), - ], -) -def test_count_vector_featurizer_attribute_featurization( - sentence, intent, response, intent_features, response_features -): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer({"token_pattern": r"(?u)\b\w+\b"}) - train_message = Message(sentence) - - # this is needed for a valid training example - train_message.set("intent", intent) - train_message.set("response", response) - - data = TrainingData([train_message]) - ftr.train(data) - - assert train_message.get("intent_features") == intent_features - assert train_message.get("response_features") == response_features - - -@pytest.mark.parametrize( - "sentence, intent, response, text_features, intent_features, response_features", - [ - ("hello hello greet ", "greet", "hello", [1, 2], [1, 0], [0, 1]), - ( - "I am fine", - "acknowledge", - "good", - [0, 1, 1, 0, 1], - [1, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - ), - ], -) -def test_count_vector_featurizer_shared_vocab( - sentence, intent, response, text_features, intent_features, response_features -): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer( - {"token_pattern": r"(?u)\b\w+\b", "use_shared_vocab": True} - ) - train_message = Message(sentence) - - # this is needed for a valid training example - train_message.set("intent", intent) - train_message.set("response", response) - - data = TrainingData([train_message]) - ftr.train(data) - - assert np.all(train_message.get("text_features") == text_features) - assert np.all(train_message.get("intent_features") == intent_features) - assert np.all(train_message.get("response_features") == response_features) - - -@pytest.mark.parametrize( - "sentence, expected", - [ - ("hello hello hello hello hello __OOV__", [1, 5]), - ("hello goodbye hello __oov__", [1, 1, 2]), - ("a b c d e f __oov__ __OOV__ __OOV__", [3, 1, 1, 1, 1, 1, 1]), - ("__OOV__ a 1 2 __oov__ __OOV__", [2, 3, 1]), - ], -) -def test_count_vector_featurizer_oov_token(sentence, expected): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer( - {"token_pattern": r"(?u)\b\w+\b", "OOV_token": "__oov__"} - ) - train_message = Message(sentence) - # this is needed for a valid training example - train_message.set("intent", "bla") - data = TrainingData([train_message]) - ftr.train(data) - - test_message = Message(sentence) - ftr.process(test_message) - - assert np.all(test_message.get("text_features") == expected) - - -@pytest.mark.parametrize( - "sentence, expected", - [ - ("hello hello hello hello hello oov_word0", [1, 5]), - ("hello goodbye hello oov_word0 OOV_word0", [2, 1, 2]), - ("a b c d e f __oov__ OOV_word0 oov_word1", [3, 1, 1, 1, 1, 1, 1]), - ("__OOV__ a 1 2 __oov__ OOV_word1", [2, 3, 1]), - ], -) -def test_count_vector_featurizer_oov_words(sentence, expected): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer( - { - "token_pattern": r"(?u)\b\w+\b", - "OOV_token": "__oov__", - "OOV_words": ["oov_word0", "OOV_word1"], - } - ) - train_message = Message(sentence) - # this is needed for a valid training example - train_message.set("intent", "bla") - data = TrainingData([train_message]) - ftr.train(data) - - test_message = Message(sentence) - ftr.process(test_message) - - assert np.all(test_message.get("text_features") == expected) - - -@pytest.mark.parametrize( - "tokens, expected", - [ - (["hello", "hello", "hello", "hello", "hello"], [5]), - (["你好", "你好", "你好", "你好", "你好"], [5]), # test for unicode chars - (["hello", "goodbye", "hello"], [1, 2]), - # Note: order has changed in Chinese version of "hello" & "goodbye" - (["你好", "再见", "你好"], [2, 1]), # test for unicode chars - (["a", "b", "c", "d", "e", "f"], [1, 1, 1, 1, 1, 1]), - (["a", "1", "2"], [2, 1]), - ], -) -def test_count_vector_featurizer_using_tokens(tokens, expected): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer({"token_pattern": r"(?u)\b\w+\b"}) - - # using empty string instead of real text string to make sure - # count vector only can come from `tokens` feature. - # using `message.text` can not get correct result - - tokens_feature = [Token(i, 0) for i in tokens] - - train_message = Message("") - train_message.set("tokens", tokens_feature) - # this is needed for a valid training example - train_message.set("intent", "bla") - data = TrainingData([train_message]) - - ftr.train(data) - - test_message = Message("") - test_message.set("tokens", tokens_feature) - - ftr.process(test_message) - - assert np.all(test_message.get("text_features") == expected) - - -@pytest.mark.parametrize( - "sentence, expected", - [ - ("ababab", [3, 3, 3, 2]), - ("ab ab ab", [2, 2, 3, 3, 3, 2]), - ("abc", [1, 1, 1, 1, 1]), - ], -) -def test_count_vector_featurizer_char(sentence, expected): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - ftr = CountVectorsFeaturizer({"min_ngram": 1, "max_ngram": 2, "analyzer": "char"}) - train_message = Message(sentence) - # this is needed for a valid training example - train_message.set("intent", "bla") - data = TrainingData([train_message]) - ftr.train(data) - - test_message = Message(sentence) - ftr.process(test_message) - - assert np.all(test_message.get("text_features") == expected) - - -def test_count_vector_featurizer_persist_load(tmpdir): - from rasa.nlu.featurizers.count_vectors_featurizer import CountVectorsFeaturizer - - # set non default values to config - config = { - "analyzer": "char", - "token_pattern": r"(?u)\b\w+\b", - "strip_accents": "ascii", - "stop_words": "stop", - "min_df": 2, - "max_df": 3, - "min_ngram": 2, - "max_ngram": 3, - "max_features": 10, - "lowercase": False, - } - train_ftr = CountVectorsFeaturizer(config) - - sentence1 = "ababab 123 13xc лаомтгцу sfjv oö aà" - sentence2 = "abababalidcn 123123 13xcdc лаомтгцу sfjv oö aà" - train_message1 = Message(sentence1) - train_message2 = Message(sentence2) - - # this is needed for a valid training example - train_message1.set("intent", "bla") - train_message2.set("intent", "bla") - data = TrainingData([train_message1, train_message2]) - train_ftr.train(data) - # persist featurizer - file_dict = train_ftr.persist("ftr", tmpdir.strpath) - train_vect_params = { - attribute: vectorizer.get_params() - for attribute, vectorizer in train_ftr.vectorizers.items() - } - # add trained vocabulary to vectorizer params - for attribute, attribute_vect_params in train_vect_params.items(): - if hasattr(train_ftr.vectorizers[attribute], "vocabulary_"): - train_vect_params[attribute].update( - {"vocabulary": train_ftr.vectorizers[attribute].vocabulary_} - ) - - # load featurizer - meta = train_ftr.component_config.copy() - meta.update(file_dict) - test_ftr = CountVectorsFeaturizer.load(meta, tmpdir.strpath) - test_vect_params = { - attribute: vectorizer.get_params() - for attribute, vectorizer in test_ftr.vectorizers.items() - } - - assert train_vect_params == test_vect_params - - test_message1 = Message(sentence1) - test_ftr.process(test_message1) - test_message2 = Message(sentence2) - test_ftr.process(test_message2) - - # check that train features and test features after loading are the same - assert np.all( - [ - train_message1.get("text_features") == test_message1.get("text_features"), - train_message2.get("text_features") == test_message2.get("text_features"), - ] - ) diff --git a/tests/nlu/base/test_synonyms.py b/tests/nlu/base/test_synonyms.py deleted file mode 100644 index 5f67c993071d..000000000000 --- a/tests/nlu/base/test_synonyms.py +++ /dev/null @@ -1,26 +0,0 @@ -from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper -from rasa.nlu.model import Metadata -import pytest - - -def test_entity_synonyms(): - entities = [ - {"entity": "test", "value": "chines", "start": 0, "end": 6}, - {"entity": "test", "value": "chinese", "start": 0, "end": 6}, - {"entity": "test", "value": "china", "start": 0, "end": 6}, - ] - ent_synonyms = {"chines": "chinese", "NYC": "New York City"} - EntitySynonymMapper(synonyms=ent_synonyms).replace_synonyms(entities) - assert len(entities) == 3 - assert entities[0]["value"] == "chinese" - assert entities[1]["value"] == "chinese" - assert entities[2]["value"] == "china" - - -def test_loading_no_warning(): - syn = EntitySynonymMapper(synonyms=None) - syn.persist("test", "test") - meta = Metadata({"test": 1}, "test") - with pytest.warns(None) as warn: - syn.load(meta.for_component(0), "test", meta) - assert len(warn) == 0 diff --git a/tests/nlu/base/test_tokenizers.py b/tests/nlu/base/test_tokenizers.py deleted file mode 100644 index d6e0f78691e6..000000000000 --- a/tests/nlu/base/test_tokenizers.py +++ /dev/null @@ -1,258 +0,0 @@ -# -*- coding: utf-8 -*- - -from unittest.mock import patch -from rasa.nlu.training_data import TrainingData, Message -from tests.nlu import utilities -from rasa.nlu import training_data - - -def test_whitespace(): - from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer - - tk = WhitespaceTokenizer() - - assert [t.text for t in tk.tokenize("Forecast for lunch")] == [ - "Forecast", - "for", - "lunch", - ] - - assert [t.offset for t in tk.tokenize("Forecast for lunch")] == [0, 9, 13] - - # we ignore .,!? - assert [t.text for t in tk.tokenize("hey ńöñàśçií how're you?")] == [ - "hey", - "ńöñàśçií", - "how", - "re", - "you", - ] - - assert [t.offset for t in tk.tokenize("hey ńöñàśçií how're you?")] == [ - 0, - 4, - 13, - 17, - 20, - ] - - assert [t.text for t in tk.tokenize("привет! 10.000, ńöñàśçií. (how're you?)")] == [ - "привет", - "10.000", - "ńöñàśçií", - "how", - "re", - "you", - ] - - assert [ - t.offset for t in tk.tokenize("привет! 10.000, ńöñàśçií. (how're you?)") - ] == [0, 8, 16, 27, 31, 34] - - # urls are single token - assert [ - t.text - for t in tk.tokenize( - "https://www.google.com/search?client=" - "safari&rls=en&q=" - "i+like+rasa&ie=UTF-8&oe=UTF-8 " - "https://rasa.com/docs/nlu/" - "components/#tokenizer-whitespace" - ) - ] == [ - "https://www.google.com/search?" - "client=safari&rls=en&q=i+like+rasa&ie=UTF-8&oe=UTF-8", - "https://rasa.com/docs/nlu/components/#tokenizer-whitespace", - ] - - assert [ - t.offset - for t in tk.tokenize( - "https://www.google.com/search?client=" - "safari&rls=en&q=" - "i+like+rasa&ie=UTF-8&oe=UTF-8 " - "https://rasa.com/docs/nlu/" - "components/#tokenizer-whitespace" - ) - ] == [0, 83] - - -def test_whitespace_custom_intent_symbol(): - from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer - - component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} - - tk = WhitespaceTokenizer(component_config) - - assert [t.text for t in tk.tokenize("Forecast_for_LUNCH", attribute="intent")] == [ - "Forecast_for_LUNCH" - ] - - assert [t.text for t in tk.tokenize("Forecast+for+LUNCH", attribute="intent")] == [ - "Forecast", - "for", - "LUNCH", - ] - - -def test_whitespace_with_case(): - from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer - - component_config = {"case_sensitive": False} - tk = WhitespaceTokenizer(component_config) - assert [t.text for t in tk.tokenize("Forecast for LUNCH")] == [ - "forecast", - "for", - "lunch", - ] - - component_config = {"case_sensitive": True} - tk = WhitespaceTokenizer(component_config) - assert [t.text for t in tk.tokenize("Forecast for LUNCH")] == [ - "Forecast", - "for", - "LUNCH", - ] - - component_config = {} - tk = WhitespaceTokenizer(component_config) - assert [t.text for t in tk.tokenize("Forecast for LUNCH")] == [ - "Forecast", - "for", - "LUNCH", - ] - - component_config = {"case_sensitive": False} - tk = WhitespaceTokenizer(component_config) - message = Message("Forecast for LUNCH") - tk.process(message) - assert message.data.get("tokens")[0].text == "forecast" - assert message.data.get("tokens")[1].text == "for" - assert message.data.get("tokens")[2].text == "lunch" - - _config = utilities.base_test_conf("supervised_embeddings") - examples = [ - Message( - "Any Mexican restaurant will do", - { - "intent": "restaurant_search", - "entities": [ - {"start": 4, "end": 11, "value": "Mexican", "entity": "cuisine"} - ], - }, - ), - Message( - "I want Tacos!", - { - "intent": "restaurant_search", - "entities": [ - {"start": 7, "end": 12, "value": "Mexican", "entity": "cuisine"} - ], - }, - ), - ] - - component_config = {"case_sensitive": False} - tk = WhitespaceTokenizer(component_config) - tk.train(TrainingData(training_examples=examples), _config) - assert examples[0].data.get("tokens")[0].text == "any" - assert examples[0].data.get("tokens")[1].text == "mexican" - assert examples[0].data.get("tokens")[2].text == "restaurant" - assert examples[0].data.get("tokens")[3].text == "will" - assert examples[0].data.get("tokens")[4].text == "do" - assert examples[1].data.get("tokens")[0].text == "i" - assert examples[1].data.get("tokens")[1].text == "want" - assert examples[1].data.get("tokens")[2].text == "tacos" - - -def test_spacy(spacy_nlp): - from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer - - tk = SpacyTokenizer() - - text = "Forecast for lunch" - assert [t.text for t in tk.tokenize(spacy_nlp(text))] == [ - "Forecast", - "for", - "lunch", - ] - assert [t.offset for t in tk.tokenize(spacy_nlp(text))] == [0, 9, 13] - - text = "hey ńöñàśçií how're you?" - assert [t.text for t in tk.tokenize(spacy_nlp(text))] == [ - "hey", - "ńöñàśçií", - "how", - "'re", - "you", - "?", - ] - assert [t.offset for t in tk.tokenize(spacy_nlp(text))] == [0, 4, 13, 16, 20, 23] - - -def test_spacy_intent_tokenizer(spacy_nlp_component): - from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer - - td = training_data.load_data("data/examples/rasa/demo-rasa.json") - spacy_nlp_component.train(td, config=None) - spacy_tokenizer = SpacyTokenizer() - spacy_tokenizer.train(td, config=None) - - intent_tokens_exist = [ - True if example.get("intent_tokens") is not None else False - for example in td.intent_examples - ] - - # no intent tokens should have been set - assert not any(intent_tokens_exist) - - -def test_mitie(): - from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer - - tk = MitieTokenizer() - - text = "Forecast for lunch" - assert [t.text for t in tk.tokenize(text)] == ["Forecast", "for", "lunch"] - assert [t.offset for t in tk.tokenize(text)] == [0, 9, 13] - - text = "hey ńöñàśçií how're you?" - assert [t.text for t in tk.tokenize(text)] == [ - "hey", - "ńöñàśçií", - "how", - "'re", - "you", - "?", - ] - assert [t.offset for t in tk.tokenize(text)] == [0, 4, 13, 16, 20, 23] - - -def test_jieba(): - from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer - - tk = JiebaTokenizer() - - assert [t.text for t in tk.tokenize("我想去吃兰州拉面")] == ["我", "想", "去", "吃", "兰州", "拉面"] - - assert [t.offset for t in tk.tokenize("我想去吃兰州拉面")] == [0, 1, 2, 3, 4, 6] - - assert [t.text for t in tk.tokenize("Micheal你好吗?")] == ["Micheal", "你好", "吗", "?"] - - assert [t.offset for t in tk.tokenize("Micheal你好吗?")] == [0, 7, 9, 10] - - -def test_jieba_load_dictionary(tmpdir_factory): - from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer - - dictionary_path = tmpdir_factory.mktemp("jieba_custom_dictionary").strpath - - component_config = {"dictionary_path": dictionary_path} - - with patch.object( - JiebaTokenizer, "load_custom_dictionary", return_value=None - ) as mock_method: - tk = JiebaTokenizer(component_config) - tk.tokenize("") - - mock_method.assert_called_once_with(dictionary_path) diff --git a/tests/nlu/classifiers/__init__.py b/tests/nlu/classifiers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/classifiers/test_diet_classifier.py b/tests/nlu/classifiers/test_diet_classifier.py new file mode 100644 index 000000000000..cd059da3095e --- /dev/null +++ b/tests/nlu/classifiers/test_diet_classifier.py @@ -0,0 +1,401 @@ +import numpy as np +import pytest +from unittest.mock import Mock + +from rasa.nlu.constants import FEATURE_TYPE_SEQUENCE, FEATURE_TYPE_SENTENCE +from rasa.nlu.featurizers.featurizer import Features +from rasa.nlu import train +from rasa.nlu.classifiers import LABEL_RANKING_LENGTH +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.constants import TEXT, INTENT +from rasa.utils.tensorflow.constants import ( + LOSS_TYPE, + RANDOM_SEED, + RANKING_LENGTH, + EPOCHS, + MASKED_LM, + TENSORBOARD_LOG_LEVEL, + TENSORBOARD_LOG_DIR, + EVAL_NUM_EPOCHS, + EVAL_NUM_EXAMPLES, + BILOU_FLAG, +) +from rasa.nlu.classifiers.diet_classifier import DIETClassifier +from rasa.nlu.model import Interpreter +from rasa.nlu.training_data import Message +from rasa.utils import train_utils +from tests.conftest import DEFAULT_NLU_DATA +from tests.nlu.conftest import DEFAULT_DATA_PATH + + +def test_compute_default_label_features(): + label_features = [ + Message("test a"), + Message("test b"), + Message("test c"), + Message("test d"), + ] + + output = DIETClassifier._compute_default_label_features(label_features) + + output = output[0] + + for i, o in enumerate(output): + assert isinstance(o, np.ndarray) + assert o[0][i] == 1 + assert o.shape == (1, len(label_features)) + + +@pytest.mark.parametrize( + "messages, expected", + [ + ( + [ + Message( + "test a", + features=[ + Features(np.zeros(1), FEATURE_TYPE_SEQUENCE, TEXT, "test"), + Features(np.zeros(1), FEATURE_TYPE_SENTENCE, TEXT, "test"), + ], + ), + Message( + "test b", + features=[ + Features(np.zeros(1), FEATURE_TYPE_SEQUENCE, TEXT, "test"), + Features(np.zeros(1), FEATURE_TYPE_SENTENCE, TEXT, "test"), + ], + ), + ], + True, + ), + ( + [ + Message( + "test a", + features=[ + Features(np.zeros(1), FEATURE_TYPE_SEQUENCE, INTENT, "test"), + Features(np.zeros(1), FEATURE_TYPE_SENTENCE, INTENT, "test"), + ], + ) + ], + False, + ), + ( + [ + Message( + "test a", + features=[ + Features(np.zeros(2), FEATURE_TYPE_SEQUENCE, INTENT, "test") + ], + ) + ], + False, + ), + ], +) +def test_check_labels_features_exist(messages, expected): + attribute = TEXT + classifier = DIETClassifier() + assert classifier._check_labels_features_exist(messages, attribute) == expected + + +async def _train_persist_load_with_different_settings( + pipeline, component_builder, tmpdir +): + _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) + + (trainer, trained, persisted_path) = await train( + _config, + path=tmpdir.strpath, + data="data/examples/rasa/demo-rasa-multi-intent.md", + component_builder=component_builder, + ) + + assert trainer.pipeline + assert trained.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + + assert loaded.pipeline + assert loaded.parse("Rasa is great!") == trained.parse("Rasa is great!") + + +@pytest.mark.skip_on_windows +async def test_train_persist_load_with_different_settings_non_windows( + component_builder, tmpdir +): + pipeline = [ + { + "name": "ConveRTTokenizer", + "intent_tokenization_flag": True, + "intent_split_symbol": "+", + }, + {"name": "CountVectorsFeaturizer"}, + {"name": "ConveRTFeaturizer"}, + {"name": "DIETClassifier", MASKED_LM: True, EPOCHS: 1}, + ] + await _train_persist_load_with_different_settings( + pipeline, component_builder, tmpdir + ) + + +async def test_train_persist_load_with_different_settings(component_builder, tmpdir): + pipeline = [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + {"name": "DIETClassifier", LOSS_TYPE: "margin", EPOCHS: 1}, + ] + await _train_persist_load_with_different_settings( + pipeline, component_builder, tmpdir + ) + + +async def test_raise_error_on_incorrect_pipeline(component_builder, tmpdir): + _config = RasaNLUModelConfig( + { + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "DIETClassifier", EPOCHS: 1}, + ], + "language": "en", + } + ) + + with pytest.raises(Exception) as e: + await train( + _config, + path=tmpdir.strpath, + data=DEFAULT_DATA_PATH, + component_builder=component_builder, + ) + + assert ( + "'DIETClassifier' requires ['Featurizer']. " + "Add required components to the pipeline." in str(e.value) + ) + + +def as_pipeline(*components): + return [{"name": c} for c in components] + + +@pytest.mark.parametrize( + "classifier_params, data_path, output_length, output_should_sum_to_1", + [ + ( + {RANDOM_SEED: 42, EPOCHS: 1}, + "data/test/many_intents.md", + 10, + True, + ), # default config + ( + {RANDOM_SEED: 42, RANKING_LENGTH: 0, EPOCHS: 1}, + "data/test/many_intents.md", + LABEL_RANKING_LENGTH, + False, + ), # no normalization + ( + {RANDOM_SEED: 42, RANKING_LENGTH: 3, EPOCHS: 1}, + "data/test/many_intents.md", + 3, + True, + ), # lower than default ranking_length + ( + {RANDOM_SEED: 42, RANKING_LENGTH: 12, EPOCHS: 1}, + "data/test/many_intents.md", + LABEL_RANKING_LENGTH, + False, + ), # higher than default ranking_length + ( + {RANDOM_SEED: 42, EPOCHS: 1}, + DEFAULT_NLU_DATA, + 7, + True, + ), # less intents than default ranking_length + ], +) +async def test_softmax_normalization( + component_builder, + tmpdir, + classifier_params, + data_path, + output_length, + output_should_sum_to_1, +): + pipeline = as_pipeline( + "WhitespaceTokenizer", "CountVectorsFeaturizer", "DIETClassifier" + ) + assert pipeline[2]["name"] == "DIETClassifier" + pipeline[2].update(classifier_params) + + _config = RasaNLUModelConfig({"pipeline": pipeline}) + (trained_model, _, persisted_path) = await train( + _config, + path=tmpdir.strpath, + data=data_path, + component_builder=component_builder, + ) + loaded = Interpreter.load(persisted_path, component_builder) + + parse_data = loaded.parse("hello") + intent_ranking = parse_data.get("intent_ranking") + # check that the output was correctly truncated after normalization + assert len(intent_ranking) == output_length + + # check whether normalization had the expected effect + output_sums_to_1 = sum( + [intent.get("confidence") for intent in intent_ranking] + ) == pytest.approx(1) + assert output_sums_to_1 == output_should_sum_to_1 + + # check whether the normalization of rankings is reflected in intent prediction + assert parse_data.get("intent") == intent_ranking[0] + + +@pytest.mark.parametrize( + "classifier_params, output_length", + [({LOSS_TYPE: "margin", RANDOM_SEED: 42, EPOCHS: 1}, LABEL_RANKING_LENGTH)], +) +async def test_margin_loss_is_not_normalized( + monkeypatch, component_builder, tmpdir, classifier_params, output_length +): + pipeline = as_pipeline( + "WhitespaceTokenizer", "CountVectorsFeaturizer", "DIETClassifier" + ) + assert pipeline[2]["name"] == "DIETClassifier" + pipeline[2].update(classifier_params) + + mock = Mock() + monkeypatch.setattr(train_utils, "normalize", mock.normalize) + + _config = RasaNLUModelConfig({"pipeline": pipeline}) + (trained_model, _, persisted_path) = await train( + _config, + path=tmpdir.strpath, + data="data/test/many_intents.md", + component_builder=component_builder, + ) + loaded = Interpreter.load(persisted_path, component_builder) + + parse_data = loaded.parse("hello") + intent_ranking = parse_data.get("intent_ranking") + + # check that the output was not normalized + mock.normalize.assert_not_called() + + # check that the output was correctly truncated + assert len(intent_ranking) == output_length + + # make sure top ranking is reflected in intent prediction + assert parse_data.get("intent") == intent_ranking[0] + + +async def test_set_random_seed(component_builder, tmpdir): + """test if train result is the same for two runs of tf embedding""" + + # set fixed random seed + _config = RasaNLUModelConfig( + { + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + {"name": "DIETClassifier", RANDOM_SEED: 1, EPOCHS: 1}, + ], + "language": "en", + } + ) + + # first run + (trained_a, _, persisted_path_a) = await train( + _config, + path=tmpdir.strpath + "_a", + data=DEFAULT_DATA_PATH, + component_builder=component_builder, + ) + # second run + (trained_b, _, persisted_path_b) = await train( + _config, + path=tmpdir.strpath + "_b", + data=DEFAULT_DATA_PATH, + component_builder=component_builder, + ) + + loaded_a = Interpreter.load(persisted_path_a, component_builder) + loaded_b = Interpreter.load(persisted_path_b, component_builder) + result_a = loaded_a.parse("hello")["intent"]["confidence"] + result_b = loaded_b.parse("hello")["intent"]["confidence"] + + assert result_a == result_b + + +async def test_train_tensorboard_logging(component_builder, tmpdir): + from pathlib import Path + + tensorboard_log_dir = Path(tmpdir.strpath) / "tensorboard" + + assert not tensorboard_log_dir.exists() + + _config = RasaNLUModelConfig( + { + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + { + "name": "DIETClassifier", + EPOCHS: 3, + TENSORBOARD_LOG_LEVEL: "epoch", + TENSORBOARD_LOG_DIR: str(tensorboard_log_dir), + EVAL_NUM_EXAMPLES: 15, + EVAL_NUM_EPOCHS: 1, + }, + ], + "language": "en", + } + ) + + await train( + _config, + path=tmpdir.strpath, + data="data/examples/rasa/demo-rasa-multi-intent.md", + component_builder=component_builder, + ) + + assert tensorboard_log_dir.exists() + + all_files = list(tensorboard_log_dir.rglob("*.*")) + assert len(all_files) == 3 + + +@pytest.mark.parametrize( + "classifier_params", + [ + {RANDOM_SEED: 1, EPOCHS: 1, BILOU_FLAG: False}, + {RANDOM_SEED: 1, EPOCHS: 1, BILOU_FLAG: True}, + ], +) +async def test_train_persist_load_with_composite_entities( + classifier_params, component_builder, tmpdir +): + pipeline = as_pipeline( + "WhitespaceTokenizer", "CountVectorsFeaturizer", "DIETClassifier" + ) + assert pipeline[2]["name"] == "DIETClassifier" + pipeline[2].update(classifier_params) + + _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) + + (trainer, trained, persisted_path) = await train( + _config, + path=tmpdir.strpath, + data="data/test/demo-rasa-composite-entities.md", + component_builder=component_builder, + ) + + assert trainer.pipeline + assert trained.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + + assert loaded.pipeline + text = "I am looking for an italian restaurant" + assert loaded.parse(text) == trained.parse(text) diff --git a/tests/nlu/classifiers/test_fallback_classifier.py b/tests/nlu/classifiers/test_fallback_classifier.py new file mode 100644 index 000000000000..5e2a339c6f12 --- /dev/null +++ b/tests/nlu/classifiers/test_fallback_classifier.py @@ -0,0 +1,150 @@ +import copy +from typing import Dict + +import pytest + +from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME +from rasa.core.constants import DEFAULT_NLU_FALLBACK_THRESHOLD +from rasa.nlu.classifiers.fallback_classifier import ( + FallbackClassifier, + THRESHOLD_KEY, + AMBIGUITY_THRESHOLD_KEY, +) +from rasa.nlu.training_data import Message +from rasa.nlu.constants import ( + INTENT_RANKING_KEY, + INTENT, + INTENT_CONFIDENCE_KEY, + INTENT_NAME_KEY, +) + + +@pytest.mark.parametrize( + "message, component_config", + [ + ( + Message( + "some message", + data={ + INTENT: { + INTENT_NAME_KEY: "greet", + INTENT_CONFIDENCE_KEY: 0.234891876578331, + }, + INTENT_RANKING_KEY: [ + { + INTENT_NAME_KEY: "greet", + INTENT_CONFIDENCE_KEY: 0.234891876578331, + }, + {INTENT_NAME_KEY: "stop", INTENT_CONFIDENCE_KEY: 0.5 - 0.0001}, + {INTENT_NAME_KEY: "affirm", INTENT_CONFIDENCE_KEY: 0}, + {INTENT_NAME_KEY: "inform", INTENT_CONFIDENCE_KEY: -100}, + { + INTENT_NAME_KEY: "deny", + INTENT_CONFIDENCE_KEY: 0.0879683718085289, + }, + ], + }, + ), + {THRESHOLD_KEY: 0.5}, + ), + ( + Message( + "some message", + data={ + INTENT: {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + INTENT_RANKING_KEY: [ + {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + {INTENT_NAME_KEY: "stop", INTENT_CONFIDENCE_KEY: 0.9}, + ], + }, + ), + {THRESHOLD_KEY: 0.5, AMBIGUITY_THRESHOLD_KEY: 0.1}, + ), + ( + Message( + "some message", + data={ + INTENT: {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + INTENT_RANKING_KEY: [ + {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + {INTENT_NAME_KEY: "stop", INTENT_CONFIDENCE_KEY: 0.5}, + ], + }, + ), + {THRESHOLD_KEY: 0.5, AMBIGUITY_THRESHOLD_KEY: 0.51}, + ), + ], +) +def test_predict_fallback_intent(message: Message, component_config: Dict): + old_message_state = copy.deepcopy(message) + classifier = FallbackClassifier(component_config=component_config) + classifier.process(message) + + expected_intent = { + INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME, + INTENT_CONFIDENCE_KEY: 1.0, + } + assert message.data[INTENT] == expected_intent + + old_intent_ranking = old_message_state.data[INTENT_RANKING_KEY] + current_intent_ranking = message.data[INTENT_RANKING_KEY] + + assert len(current_intent_ranking) == len(old_intent_ranking) + 1 + assert all(item in current_intent_ranking for item in old_intent_ranking) + assert current_intent_ranking[0] == expected_intent + + +@pytest.mark.parametrize( + "message, component_config", + [ + ( + Message( + "some message", + data={ + INTENT: {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 0.5}, + INTENT_RANKING_KEY: [ + { + INTENT_NAME_KEY: "greet", + INTENT_CONFIDENCE_KEY: 0.234891876578331, + }, + {INTENT_NAME_KEY: "stop", INTENT_CONFIDENCE_KEY: 0.1}, + {INTENT_NAME_KEY: "affirm", INTENT_CONFIDENCE_KEY: 0}, + {INTENT_NAME_KEY: "inform", INTENT_CONFIDENCE_KEY: -100}, + { + INTENT_NAME_KEY: "deny", + INTENT_CONFIDENCE_KEY: 0.0879683718085289, + }, + ], + }, + ), + {THRESHOLD_KEY: 0.5}, + ), + ( + Message( + "some message", + data={ + INTENT: {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + INTENT_RANKING_KEY: [ + {INTENT_NAME_KEY: "greet", INTENT_CONFIDENCE_KEY: 1}, + {INTENT_NAME_KEY: "stop", INTENT_CONFIDENCE_KEY: 0.89}, + ], + }, + ), + {THRESHOLD_KEY: 0.5, AMBIGUITY_THRESHOLD_KEY: 0.1}, + ), + ], +) +def test_not_predict_fallback_intent(message: Message, component_config: Dict): + old_message_state = copy.deepcopy(message) + + classifier = FallbackClassifier(component_config=component_config) + classifier.process(message) + + assert message == old_message_state + + +def test_defaults(): + classifier = FallbackClassifier({}) + + assert classifier.component_config[THRESHOLD_KEY] == DEFAULT_NLU_FALLBACK_THRESHOLD + assert classifier.component_config[AMBIGUITY_THRESHOLD_KEY] == 0.1 diff --git a/tests/nlu/classifiers/test_keyword_classifier.py b/tests/nlu/classifiers/test_keyword_classifier.py new file mode 100644 index 000000000000..e5101c93f939 --- /dev/null +++ b/tests/nlu/classifiers/test_keyword_classifier.py @@ -0,0 +1,167 @@ +import pytest +import copy + +from rasa.nlu.classifiers.keyword_intent_classifier import KeywordIntentClassifier + +# TODO: add tests for other classifers +# from rasa.nlu.classifiers.mitie_intent_classifier import MitieIntentClassifier +from rasa.nlu.training_data.formats.rasa import RasaReader +from rasa.nlu.training_data import load_data +from rasa.nlu.training_data.message import Message +from tests.nlu.conftest import DEFAULT_DATA_PATH + + +@pytest.fixture(scope="module") +def training_data(): + return load_data(DEFAULT_DATA_PATH) + + +class ClassifierTestCollection: + """Tests every classifier needs to fulfill. + + Each classifier can have additional tests in its own class.""" + + @pytest.fixture(scope="module") + def classifier_class(self): + return NotImplementedError + + @pytest.fixture(scope="class") + def filename(self, classifier_class): + return "component_0_" + classifier_class.name + + @pytest.fixture(scope="module") + def trained_classifier( + self, classifier_class, training_data, component_config, **kwargs + ): + return self._train_classifier( + classifier_class, training_data, component_config, **kwargs + ) + + def _train_classifier( + self, classifier_class, training_data, component_config, **kwargs + ): + # this ugly line is here because the kwargs of this function contain kwargs + # for both the classifier init and the training, getting the names of the + # classifiers kwargs we can separate them from the training kwargs + classifier_params = classifier_class.__init__.__code__.co_varnames + train_params = {} + for p in classifier_params: + arg = kwargs.pop(p, None) + if arg is not None: + train_params.update(arg) + classifier = self._create_classifier( + classifier_class, component_config, **kwargs + ) + classifier.train(training_data, {}, **train_params) + return classifier + + @pytest.fixture(scope="module") + def component_config(self): + return {} + + @staticmethod + def _create_classifier(classifier_class, component_config, **kwargs): + classifier = classifier_class(component_config, **kwargs) + return classifier + + def test_persist_and_load( + self, training_data, trained_classifier, filename, tmpdir + ): + meta = trained_classifier.persist(filename, tmpdir.strpath) + loaded = trained_classifier.__class__.load(meta, tmpdir.strpath) + predicted = copy.copy(training_data) + actual = copy.copy(training_data) + for m1, m2 in zip(predicted.training_examples, actual.training_examples): + loaded.process(m1) + trained_classifier.process(m2) + assert m1.get("intent") == m2.get("intent") + + +class TestKeywordClassifier(ClassifierTestCollection): + @pytest.fixture(scope="module") + def classifier_class(self): + return KeywordIntentClassifier + + @pytest.mark.parametrize( + "message, intent", + [ + ("hey there joe", "greet"), + ("hello weiouaosdhalkh", "greet"), + ("show me chinese restaurants in the north of town", "restaurant_search"), + ("great", "affirm"), + ("bye bye birdie", "goodbye"), + ("show me a mexican place", None), + ("i", None), + ("in", None), + ("eet", None), + ], + ) + def test_classification(self, trained_classifier, message, intent): + text = Message(message) + trained_classifier.process(text) + assert text.get("intent").get("name", "NOT_CLASSIFIED") == intent + + def test_valid_data( + self, caplog, classifier_class, training_data, component_config, **kwargs + ): + json_data = { + "rasa_nlu_data": { + "common_examples": [ + {"text": "good", "intent": "affirm", "entities": []}, + {"text": "bye", "intent": "goodbye", "entities": []}, + {"text": "see ya", "intent": "goodbye", "entities": []}, + {"text": "yes", "intent": "affirm", "entities": []}, + {"text": "ciao", "intent": "goodbye", "entities": []}, + ] + } + } + rasa_reader = RasaReader() + data = rasa_reader.read_from_json(json_data) + + with pytest.warns(None) as record: + self._train_classifier(classifier_class, data, component_config, **kwargs) + assert len(record) == 0 + + @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") + def test_identical_data( + self, caplog, classifier_class, training_data, component_config, **kwargs + ): + json_data = { + "rasa_nlu_data": { + "common_examples": [ + {"text": "good", "intent": "affirm", "entities": []}, + {"text": "good", "intent": "goodbye", "entities": []}, + ] + } + } + rasa_reader = RasaReader() + data = rasa_reader.read_from_json(json_data) + + with pytest.warns(UserWarning) as record: + self._train_classifier(classifier_class, data, component_config, **kwargs) + assert len(record) == 1 + assert ( + "Remove (one of) the duplicates from the training data." + in record[0].message.args[0] + ) + + @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") + def test_ambiguous_data( + self, caplog, classifier_class, training_data, component_config, **kwargs + ): + json_data = { + "rasa_nlu_data": { + "common_examples": [ + {"text": "good", "intent": "affirm", "entities": []}, + {"text": "good morning", "intent": "greet", "entities": []}, + {"text": "see you", "intent": "goodbye", "entities": []}, + {"text": "nice to see you", "intent": "greet", "entities": []}, + ] + } + } + rasa_reader = RasaReader() + data = rasa_reader.read_from_json(json_data) + + with pytest.warns(UserWarning) as record: + self._train_classifier(classifier_class, data, component_config, **kwargs) + assert len(record) == 2 diff --git a/tests/nlu/conftest.py b/tests/nlu/conftest.py index 21588dac0d2b..cdd86c43c1ad 100644 --- a/tests/nlu/conftest.py +++ b/tests/nlu/conftest.py @@ -1,21 +1,14 @@ -import logging -import os +from typing import Text import pytest -from rasa.nlu import config, train +from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.components import ComponentBuilder - -CONFIG_DEFAULTS_PATH = "sample_configs/config_defaults.yml" - -NLU_DEFAULT_CONFIG_PATH = "sample_configs/config_pretrained_embeddings_mitie.yml" +from rasa.utils.tensorflow.constants import EPOCHS, RANDOM_SEED +from tests.nlu.utilities import write_file_config DEFAULT_DATA_PATH = "data/examples/rasa/demo-rasa.json" -NLU_MODEL_NAME = "nlu_model.tar.gz" - -MOODBOT_MODEL_PATH = "examples/moodbot/models/" - @pytest.fixture(scope="session") def component_builder(): @@ -23,47 +16,108 @@ def component_builder(): @pytest.fixture(scope="session") -def spacy_nlp(component_builder, default_config): +def spacy_nlp(component_builder, blank_config): spacy_nlp_config = {"name": "SpacyNLP"} - return component_builder.create_component(spacy_nlp_config, default_config).nlp + return component_builder.create_component(spacy_nlp_config, blank_config).nlp @pytest.fixture(scope="session") -def spacy_nlp_component(component_builder, default_config): +def spacy_nlp_component(component_builder, blank_config): spacy_nlp_config = {"name": "SpacyNLP"} - return component_builder.create_component(spacy_nlp_config, default_config) + return component_builder.create_component(spacy_nlp_config, blank_config) @pytest.fixture(scope="session") -def ner_crf_pos_feature_config(): - return { - "features": [ - ["low", "title", "upper", "pos", "pos2"], - [ - "bias", - "low", - "suffix3", - "suffix2", - "upper", - "title", - "digit", - "pos", - "pos2", - "pattern", - ], - ["low", "title", "upper", "pos", "pos2"], - ] - } +def mitie_feature_extractor(component_builder: ComponentBuilder, blank_config): + mitie_nlp_config = {"name": "MitieNLP"} + return component_builder.create_component(mitie_nlp_config, blank_config).extractor @pytest.fixture(scope="session") -def mitie_feature_extractor(component_builder, default_config): - mitie_nlp_config = {"name": "MitieNLP"} - return component_builder.create_component( - mitie_nlp_config, default_config - ).extractor +def blank_config() -> RasaNLUModelConfig: + return RasaNLUModelConfig({"language": "en", "pipeline": []}) @pytest.fixture(scope="session") -def default_config(): - return config.load(CONFIG_DEFAULTS_PATH) +def config_path() -> Text: + return write_file_config( + { + "language": "en", + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "CRFEntityExtractor", EPOCHS: 1, RANDOM_SEED: 42}, + {"name": "CountVectorsFeaturizer"}, + {"name": "DIETClassifier", EPOCHS: 1, RANDOM_SEED: 42}, + ], + } + ).name + + +@pytest.fixture(scope="session") +def config_path_duplicate() -> Text: + return write_file_config( + { + "language": "en", + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "CRFEntityExtractor", EPOCHS: 1, RANDOM_SEED: 42}, + {"name": "CountVectorsFeaturizer"}, + {"name": "DIETClassifier", EPOCHS: 1, RANDOM_SEED: 42}, + ], + } + ).name + + +@pytest.fixture() +def pretrained_embeddings_spacy_config() -> RasaNLUModelConfig: + return RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "SpacyNLP"}, + {"name": "SpacyTokenizer"}, + {"name": "SpacyFeaturizer"}, + {"name": "RegexFeaturizer"}, + {"name": "CRFEntityExtractor", EPOCHS: 1, RANDOM_SEED: 42}, + {"name": "EntitySynonymMapper"}, + {"name": "SklearnIntentClassifier"}, + ], + } + ) + + +@pytest.fixture() +def supervised_embeddings_config() -> RasaNLUModelConfig: + return RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "RegexFeaturizer"}, + {"name": "CRFEntityExtractor", EPOCHS: 1, RANDOM_SEED: 42}, + {"name": "EntitySynonymMapper"}, + {"name": "CountVectorsFeaturizer"}, + { + "name": "CountVectorsFeaturizer", + "analyzer": "char_wb", + "min_ngram": 1, + "max_ngram": 4, + }, + {"name": "DIETClassifier", EPOCHS: 1, RANDOM_SEED: 42}, + ], + } + ) + + +@pytest.fixture() +def pretrained_embeddings_convert_config() -> RasaNLUModelConfig: + return RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "ConveRTTokenizer"}, + {"name": "ConveRTFeaturizer"}, + {"name": "DIETClassifier", EPOCHS: 1, RANDOM_SEED: 42}, + ], + } + ) diff --git a/tests/nlu/emulators/__init__.py b/tests/nlu/emulators/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/emulators/test_dialogflow.py b/tests/nlu/emulators/test_dialogflow.py new file mode 100644 index 000000000000..76abc0ee0080 --- /dev/null +++ b/tests/nlu/emulators/test_dialogflow.py @@ -0,0 +1,40 @@ +def test_dialogflow_request(): + from rasa.nlu.emulators.dialogflow import DialogflowEmulator + + em = DialogflowEmulator() + norm = em.normalise_request_json({"text": ["arb text"]}) + assert norm == {"text": "arb text", "time": None} + + +def test_dialogflow_response(): + from rasa.nlu.emulators.dialogflow import DialogflowEmulator + + em = DialogflowEmulator() + data = { + "text": "I want italian food", + "intent": {"name": "inform", "confidence": 0.4794813722432127}, + "entities": [{"entity": "cuisine", "value": "italian", "start": 7, "end": 14}], + } + norm = em.normalise_response_json(data) + + assert norm == { + "id": norm["id"], + "result": { + "action": data["intent"]["name"], + "actionIncomplete": False, + "contexts": [], + "fulfillment": {}, + "metadata": { + "intentId": norm["result"]["metadata"]["intentId"], + "intentName": data["intent"]["name"], + "webhookUsed": "false", + }, + "parameters": {"cuisine": ["italian"]}, + "resolvedQuery": data["text"], + "score": data["intent"]["confidence"], + "source": "agent", + }, + "sessionId": norm["sessionId"], + "status": {"code": 200, "errorType": "success"}, + "timestamp": norm["timestamp"], + } diff --git a/tests/nlu/emulators/test_luis.py b/tests/nlu/emulators/test_luis.py new file mode 100644 index 000000000000..5c2cad97e1ba --- /dev/null +++ b/tests/nlu/emulators/test_luis.py @@ -0,0 +1,44 @@ +def test_luis_request(): + from rasa.nlu.emulators.luis import LUISEmulator + + em = LUISEmulator() + norm = em.normalise_request_json({"text": ["arb text"]}) + assert norm == {"text": "arb text", "time": None} + + +def test_luis_response(): + from rasa.nlu.emulators.luis import LUISEmulator + + em = LUISEmulator() + data = { + "text": "I want italian food", + "intent": {"name": "restaurant_search", "confidence": 0.737014589341683}, + "intent_ranking": [ + {"confidence": 0.737014589341683, "name": "restaurant_search"}, + {"confidence": 0.11605464483122209, "name": "goodbye"}, + {"confidence": 0.08816417744097163, "name": "greet"}, + {"confidence": 0.058766588386123204, "name": "affirm"}, + ], + "entities": [{"entity": "cuisine", "value": "italian"}], + } + norm = em.normalise_response_json(data) + assert norm == { + "query": data["text"], + "topScoringIntent": {"intent": "restaurant_search", "score": 0.737014589341683}, + "intents": [ + {"intent": "restaurant_search", "score": 0.737014589341683}, + {"intent": "goodbye", "score": 0.11605464483122209}, + {"intent": "greet", "score": 0.08816417744097163}, + {"intent": "affirm", "score": 0.058766588386123204}, + ], + "entities": [ + { + "entity": e["value"], + "type": e["entity"], + "startIndex": None, + "endIndex": None, + "score": None, + } + for e in data["entities"] + ], + } diff --git a/tests/nlu/emulators/test_no_emulator.py b/tests/nlu/emulators/test_no_emulator.py new file mode 100644 index 000000000000..cc40b3ae8390 --- /dev/null +++ b/tests/nlu/emulators/test_no_emulator.py @@ -0,0 +1,28 @@ +def test_dummy_request(): + from rasa.nlu.emulators.no_emulator import NoEmulator + + em = NoEmulator() + norm = em.normalise_request_json({"text": ["arb text"]}) + assert norm == {"text": "arb text", "time": None} + + norm = em.normalise_request_json({"text": ["arb text"], "time": "1499279161658"}) + assert norm == {"text": "arb text", "time": "1499279161658"} + + +def test_dummy_response(): + from rasa.nlu.emulators.no_emulator import NoEmulator + + em = NoEmulator() + data = {"intent": "greet", "text": "hi", "entities": {}, "confidence": 1.0} + assert em.normalise_response_json(data) == data + + +def test_emulators_can_handle_missing_data(): + from rasa.nlu.emulators.luis import LUISEmulator + + em = LUISEmulator() + norm = em.normalise_response_json( + {"text": "this data doesn't contain an intent result"} + ) + assert norm["topScoringIntent"] is None + assert norm["intents"] == [] diff --git a/tests/nlu/emulators/test_wit.py b/tests/nlu/emulators/test_wit.py new file mode 100644 index 000000000000..069caa26f27f --- /dev/null +++ b/tests/nlu/emulators/test_wit.py @@ -0,0 +1,34 @@ +def test_wit_request(): + from rasa.nlu.emulators.wit import WitEmulator + + em = WitEmulator() + norm = em.normalise_request_json({"text": ["arb text"]}) + assert norm == {"text": "arb text", "time": None} + + +def test_wit_response(): + from rasa.nlu.emulators.wit import WitEmulator + + em = WitEmulator() + data = { + "text": "I want italian food", + "intent": {"name": "inform", "confidence": 0.4794813722432127}, + "entities": [{"entity": "cuisine", "value": "italian", "start": 7, "end": 14}], + } + norm = em.normalise_response_json(data) + assert norm == [ + { + "entities": { + "cuisine": { + "confidence": None, + "type": "value", + "value": "italian", + "start": 7, + "end": 14, + } + }, + "intent": "inform", + "_text": "I want italian food", + "confidence": 0.4794813722432127, + } + ] diff --git a/tests/nlu/example_component.py b/tests/nlu/example_component.py index 7243290e85e7..38c2c0693027 100644 --- a/tests/nlu/example_component.py +++ b/tests/nlu/example_component.py @@ -1,6 +1,9 @@ -from rasa.nlu.components import Component import typing -from typing import Any, Optional, Text, Dict +from typing import Any, Optional, Text, Dict, List, Type + +from rasa.nlu.components import Component +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message, TrainingData if typing.TYPE_CHECKING: from rasa.nlu.model import Metadata @@ -9,18 +12,13 @@ class MyComponent(Component): """A new component""" - # Defines what attributes the pipeline component will - # provide when called. The listed attributes - # should be set by the component on the message object - # during test and train, e.g. - # ```message.set("entities", [...])``` - provides = [] + # Which components are required by this component. + # Listed components should appear before the component itself in the pipeline. + @classmethod + def required_components(cls) -> List[Type[Component]]: + """Specify which components need to be present in the pipeline.""" - # Which attributes on a message are required by this - # component. e.g. if requires contains "tokens", than a - # previous component in the pipeline needs to have "tokens" - # within the above described `provides` property. - requires = [] + return [] # Defines the default configuration parameters of a component # these values can be overwritten in the pipeline configuration @@ -32,12 +30,23 @@ class MyComponent(Component): # This attribute is designed for instance method: `can_handle_language`. # Default value is None which means it can handle all languages. # This is an important feature for backwards compatibility of components. - language_list = None + supported_language_list = None + + # Defines what language(s) this component can NOT handle. + # This attribute is designed for instance method: `can_handle_language`. + # Default value is None which means it can handle all languages. + # This is an important feature for backwards compatibility of components. + not_supported_language_list = None - def __init__(self, component_config=None): - super(MyComponent, self).__init__(component_config) + def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None: + super().__init__(component_config) - def train(self, training_data, cfg, **kwargs): + def train( + self, + training_data: TrainingData, + config: Optional[RasaNLUModelConfig] = None, + **kwargs: Any, + ) -> None: """Train this component. This is the components chance to train itself provided @@ -50,7 +59,7 @@ def train(self, training_data, cfg, **kwargs): of components previous to this one.""" pass - def process(self, message, **kwargs): + def process(self, message: Message, **kwargs: Any) -> None: """Process an incoming message. This is the components chance to process an incoming @@ -75,7 +84,7 @@ def load( model_dir: Optional[Text] = None, model_metadata: Optional["Metadata"] = None, cached_component: Optional["Component"] = None, - **kwargs: Any + **kwargs: Any, ) -> "Component": """Load this component from file.""" diff --git a/tests/nlu/extractors/__init__.py b/tests/nlu/extractors/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/extractors/test_crf_entity_extractor.py b/tests/nlu/extractors/test_crf_entity_extractor.py new file mode 100644 index 000000000000..ba32ded95d80 --- /dev/null +++ b/tests/nlu/extractors/test_crf_entity_extractor.py @@ -0,0 +1,184 @@ +from pathlib import Path + +from typing import Dict, Text, List, Any + +import pytest + +from rasa.nlu.components import ComponentBuilder +from rasa.nlu import train +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.model import Interpreter +from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer +from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer +from rasa.nlu.constants import TEXT, SPACY_DOCS, ENTITIES +from rasa.nlu.training_data import Message +from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor + + +def pipeline_from_components(*components: Text) -> List[Dict[Text, Text]]: + return [{"name": c} for c in components] + + +async def test_train_persist_load_with_composite_entities( + component_builder: ComponentBuilder, tmp_path: Path +): + pipeline = pipeline_from_components("WhitespaceTokenizer", "CRFEntityExtractor") + + _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) + + (trainer, trained, persisted_path) = await train( + _config, + path=str(tmp_path), + data="data/test/demo-rasa-composite-entities.md", + component_builder=component_builder, + ) + + assert trainer.pipeline + assert trained.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + + assert loaded.pipeline + text = "I am looking for an italian restaurant" + assert loaded.parse(text) == trained.parse(text) + + +@pytest.mark.parametrize( + "config_params", + [ + ( + { + "features": [ + ["low", "title", "upper", "pos", "pos2"], + [ + "low", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + "pos", + "pos2", + ], + ["low", "title", "upper", "pos", "pos2"], + ], + "BILOU_flag": False, + } + ), + ( + { + "features": [ + ["low", "title", "upper", "pos", "pos2"], + [ + "low", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + "pos", + "pos2", + ], + ["low", "title", "upper", "pos", "pos2"], + ], + "BILOU_flag": True, + } + ), + ], +) +async def test_train_persist_with_different_configurations( + config_params: Dict[Text, Any], component_builder: ComponentBuilder, tmp_path: Path +): + pipeline = pipeline_from_components( + "SpacyNLP", "SpacyTokenizer", "CRFEntityExtractor" + ) + assert pipeline[2]["name"] == "CRFEntityExtractor" + pipeline[2].update(config_params) + + _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) + + (trainer, trained, persisted_path) = await train( + _config, + path=str(tmp_path), + data="data/examples/rasa", + component_builder=component_builder, + ) + + assert trainer.pipeline + assert trained.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + + assert loaded.pipeline + text = "I am looking for an italian restaurant" + assert loaded.parse(text) == trained.parse(text) + + detected_entities = loaded.parse(text).get(ENTITIES) + + assert len(detected_entities) == 1 + assert detected_entities[0]["entity"] == "cuisine" + assert detected_entities[0]["value"] == "italian" + + +def test_crf_use_dense_features(spacy_nlp: Any): + crf_extractor = CRFEntityExtractor( + component_config={ + "features": [ + ["low", "title", "upper", "pos", "pos2"], + [ + "low", + "suffix3", + "suffix2", + "upper", + "title", + "digit", + "pos", + "pos2", + "text_dense_features", + ], + ["low", "title", "upper", "pos", "pos2"], + ] + } + ) + + spacy_featurizer = SpacyFeaturizer() + spacy_tokenizer = SpacyTokenizer() + + text = "Rasa is a company in Berlin" + message = Message(text) + message.set(SPACY_DOCS[TEXT], spacy_nlp(text)) + + spacy_tokenizer.process(message) + spacy_featurizer.process(message) + + text_data = crf_extractor._convert_to_crf_tokens(message) + features = crf_extractor._crf_tokens_to_features(text_data) + + assert "0:text_dense_features" in features[0] + dense_features, _ = message.get_dense_features(TEXT, []) + + for i in range(0, len(dense_features[0])): + assert ( + features[0]["0:text_dense_features"]["text_dense_features"][str(i)] + == dense_features[0][i] + ) + + +@pytest.mark.parametrize( + "entity_predictions, expected_label, expected_confidence", + [ + ([{"O": 0.34, "B-person": 0.03, "I-person": 0.85}], ["I-person"], [0.88]), + ([{"O": 0.99, "person": 0.03}], ["O"], [0.99]), + ], +) +def test_most_likely_entity( + entity_predictions: List[Dict[Text, float]], + expected_label: Text, + expected_confidence: float, +): + crf_extractor = CRFEntityExtractor({"BILOU_flag": True}) + + actual_label, actual_confidence = crf_extractor._most_likely_tag(entity_predictions) + + assert actual_label == expected_label + assert actual_confidence == expected_confidence diff --git a/tests/nlu/extractors/test_duckling_http_extractor.py b/tests/nlu/extractors/test_duckling_http_extractor.py new file mode 100644 index 000000000000..a665e2108d3e --- /dev/null +++ b/tests/nlu/extractors/test_duckling_http_extractor.py @@ -0,0 +1,227 @@ +import responses + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message + + +def test_duckling_entity_extractor(component_builder): + with responses.RequestsMock() as rsps: + rsps.add( + responses.POST, + "http://localhost:8000/parse", + json=[ + { + "body": "Today", + "start": 0, + "value": { + "values": [ + { + "value": "2018-11-13T00:00:00.000-08:00", + "grain": "day", + "type": "value", + } + ], + "value": "2018-11-13T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + "end": 5, + "dim": "time", + "latent": False, + }, + { + "body": "the 5th", + "start": 9, + "value": { + "values": [ + { + "value": "2018-12-05T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + { + "value": "2019-01-05T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + { + "value": "2019-02-05T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + ], + "value": "2018-12-05T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + "end": 16, + "dim": "time", + "latent": False, + }, + { + "body": "5th of May", + "start": 13, + "value": { + "values": [ + { + "value": "2019-05-05T00:00:00.000-07:00", + "grain": "day", + "type": "value", + }, + { + "value": "2020-05-05T00:00:00.000-07:00", + "grain": "day", + "type": "value", + }, + { + "value": "2021-05-05T00:00:00.000-07:00", + "grain": "day", + "type": "value", + }, + ], + "value": "2019-05-05T00:00:00.000-07:00", + "grain": "day", + "type": "value", + }, + "end": 23, + "dim": "time", + "latent": False, + }, + { + "body": "tomorrow", + "start": 37, + "value": { + "values": [ + { + "value": "2018-11-14T00:00:00.000-08:00", + "grain": "day", + "type": "value", + } + ], + "value": "2018-11-14T00:00:00.000-08:00", + "grain": "day", + "type": "value", + }, + "end": 45, + "dim": "time", + "latent": False, + }, + ], + ) + + _config = RasaNLUModelConfig({"pipeline": [{"name": "DucklingHTTPExtractor"}]}) + _config.set_component_attr( + 0, dimensions=["time"], timezone="UTC", url="http://localhost:8000" + ) + duckling = component_builder.create_component(_config.for_component(0), _config) + message = Message("Today is the 5th of May. Let us meet tomorrow.") + duckling.process(message) + entities = message.get("entities") + assert len(entities) == 4 + + # Test duckling with a defined date + + with responses.RequestsMock() as rsps: + rsps.add( + responses.POST, + "http://localhost:8000/parse", + json=[ + { + "body": "tomorrow", + "start": 12, + "value": { + "values": [ + { + "value": "2013-10-13T00:00:00.000Z", + "grain": "day", + "type": "value", + } + ], + "value": "2013-10-13T00:00:00.000Z", + "grain": "day", + "type": "value", + }, + "end": 20, + "dim": "time", + "latent": False, + } + ], + ) + + # 1381536182 == 2013/10/12 02:03:02 + message = Message("Let us meet tomorrow.", time="1381536182") + duckling.process(message) + entities = message.get("entities") + assert len(entities) == 1 + assert entities[0]["text"] == "tomorrow" + assert entities[0]["value"] == "2013-10-13T00:00:00.000Z" + + # Test dimension filtering includes only specified dimensions + _config = RasaNLUModelConfig({"pipeline": [{"name": "DucklingHTTPExtractor"}]}) + _config.set_component_attr( + 0, dimensions=["number"], url="http://localhost:8000" + ) + duckling_number = component_builder.create_component( + _config.for_component(0), _config + ) + + with responses.RequestsMock() as rsps: + rsps.add( + responses.POST, + "http://localhost:8000/parse", + json=[ + { + "body": "Yesterday", + "start": 0, + "value": { + "values": [ + { + "value": "2019-02-28T00:00:00.000+01:00", + "grain": "day", + "type": "value", + } + ], + "value": "2019-02-28T00:00:00.000+01:00", + "grain": "day", + "type": "value", + }, + "end": 9, + "dim": "time", + }, + { + "body": "5", + "start": 21, + "value": {"value": 5, "type": "value"}, + "end": 22, + "dim": "number", + }, + ], + ) + + message = Message("Yesterday there were 5 people in a room") + duckling_number.process(message) + entities = message.get("entities") + + assert len(entities) == 1 + assert entities[0]["text"] == "5" + assert entities[0]["value"] == 5 + + +def test_duckling_entity_extractor_and_synonyms(component_builder): + _config = RasaNLUModelConfig( + { + "pipeline": [ + {"name": "DucklingHTTPExtractor"}, + {"name": "EntitySynonymMapper"}, + ] + } + ) + _config.set_component_attr(0, dimensions=["number"]) + duckling = component_builder.create_component(_config.for_component(0), _config) + synonyms = component_builder.create_component(_config.for_component(1), _config) + message = Message("He was 6 feet away") + duckling.process(message) + # checks that the synonym processor + # can handle entities that have int values + synonyms.process(message) + assert message is not None diff --git a/tests/nlu/extractors/test_entity_synonyms.py b/tests/nlu/extractors/test_entity_synonyms.py new file mode 100644 index 000000000000..3d5230166b79 --- /dev/null +++ b/tests/nlu/extractors/test_entity_synonyms.py @@ -0,0 +1,56 @@ +from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper +from rasa.nlu.training_data import TrainingData, Message + + +def test_entity_synonyms(): + entities = [ + {"entity": "test", "value": "chines", "start": 0, "end": 6}, + {"entity": "test", "value": "chinese", "start": 0, "end": 6}, + {"entity": "test", "value": "china", "start": 0, "end": 6}, + ] + ent_synonyms = {"chines": "chinese", "NYC": "New York City"} + EntitySynonymMapper(synonyms=ent_synonyms).replace_synonyms(entities) + assert len(entities) == 3 + assert entities[0]["value"] == "chinese" + assert entities[1]["value"] == "chinese" + assert entities[2]["value"] == "china" + + +def test_unintentional_synonyms_capitalized( + component_builder, pretrained_embeddings_spacy_config +): + idx = pretrained_embeddings_spacy_config.component_names.index( + "EntitySynonymMapper" + ) + ner_syn = component_builder.create_component( + pretrained_embeddings_spacy_config.for_component(idx), + pretrained_embeddings_spacy_config, + ) + + examples = [ + Message( + "Any Mexican restaurant will do", + { + "intent": "restaurant_search", + "entities": [ + {"start": 4, "end": 11, "value": "Mexican", "entity": "cuisine"} + ], + }, + ), + Message( + "I want Tacos!", + { + "intent": "restaurant_search", + "entities": [ + {"start": 7, "end": 12, "value": "Mexican", "entity": "cuisine"} + ], + }, + ), + ] + + ner_syn.train( + TrainingData(training_examples=examples), pretrained_embeddings_spacy_config + ) + + assert ner_syn.synonyms.get("mexican") is None + assert ner_syn.synonyms.get("tacos") == "Mexican" diff --git a/tests/nlu/extractors/test_extractor.py b/tests/nlu/extractors/test_extractor.py new file mode 100644 index 000000000000..0c12f4c8eb82 --- /dev/null +++ b/tests/nlu/extractors/test_extractor.py @@ -0,0 +1,220 @@ +from typing import Any, Text, Dict, List + +import pytest + +from rasa.nlu.constants import TEXT +from rasa.nlu.training_data import Message +from rasa.nlu.extractors.extractor import EntityExtractor +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.training_data.formats import MarkdownReader + + +@pytest.mark.parametrize( + "text, tags, confidences, expected_entities", + [ + ( + "I am flying from San Fransisco to Amsterdam", + { + "entity": ["O", "O", "O", "O", "city", "city", "O", "city"], + "role": ["O", "O", "O", "O", "from", "from", "O", "to"], + }, + { + "entity": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89], + "role": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89], + }, + [ + { + "entity": "city", + "start": 17, + "end": 30, + "value": "San Fransisco", + "role": "from", + "confidence_entity": 0.78, + "confidence_role": 0.78, + }, + { + "entity": "city", + "start": 34, + "end": 43, + "value": "Amsterdam", + "role": "to", + "confidence_entity": 0.89, + "confidence_role": 0.89, + }, + ], + ), + ( + "I am flying from San Fransisco to Amsterdam", + { + "entity": ["O", "O", "O", "O", "city", "city", "O", "city"], + "group": ["O", "O", "O", "O", "1", "1", "O", "1"], + }, + None, + [ + { + "entity": "city", + "start": 17, + "end": 30, + "value": "San Fransisco", + "group": "1", + }, + { + "entity": "city", + "start": 34, + "end": 43, + "value": "Amsterdam", + "group": "1", + }, + ], + ), + ( + "Amsterdam", + {"entity": ["city"], "role": ["O"], "group": ["O"]}, + None, + [{"entity": "city", "start": 0, "end": 9, "value": "Amsterdam"}], + ), + ( + "New-York", + {"entity": ["city", "city"], "role": ["O", "O"], "group": ["O", "O"]}, + None, + [{"entity": "city", "start": 0, "end": 8, "value": "New-York"}], + ), + ( + "Amsterdam, Berlin, and London", + { + "entity": ["city", "city", "O", "city"], + "role": ["O", "O", "O", "O"], + "group": ["O", "O", "O", "O"], + }, + None, + [ + {"entity": "city", "start": 0, "end": 9, "value": "Amsterdam"}, + {"entity": "city", "start": 11, "end": 17, "value": "Berlin"}, + {"entity": "city", "start": 23, "end": 29, "value": "London"}, + ], + ), + ( + "Amsterdam Berlin and London", + { + "entity": ["U-city", "U-city", "O", "U-city"], + "role": ["O", "O", "O", "O"], + "group": ["O", "O", "O", "O"], + }, + None, + [ + {"entity": "city", "start": 0, "end": 9, "value": "Amsterdam"}, + {"entity": "city", "start": 10, "end": 16, "value": "Berlin"}, + {"entity": "city", "start": 21, "end": 27, "value": "London"}, + ], + ), + ( + "San Fransisco Amsterdam, London", + { + "entity": ["B-city", "L-city", "U-city", "U-city"], + "role": ["O", "O", "O", "O"], + "group": ["O", "O", "O", "O"], + }, + None, + [ + {"entity": "city", "start": 0, "end": 13, "value": "San Fransisco"}, + {"entity": "city", "start": 14, "end": 23, "value": "Amsterdam"}, + {"entity": "city", "start": 25, "end": 31, "value": "London"}, + ], + ), + ( + "New York City Los Angeles and San Diego", + { + "entity": [ + "B-city", + "I-city", + "L-city", + "B-city", + "L-city", + "O", + "B-city", + "L-city", + ], + "role": ["O", "O", "O", "O", "O", "O", "O", "O"], + "group": ["O", "O", "O", "O", "O", "O", "O", "O"], + }, + None, + [ + {"entity": "city", "start": 0, "end": 13, "value": "New York City"}, + {"entity": "city", "start": 14, "end": 25, "value": "Los Angeles"}, + {"entity": "city", "start": 30, "end": 39, "value": "San Diego"}, + ], + ), + ( + "Berlin weather", + {"entity": ["I-city", "O"], "role": ["O", "O"], "group": ["O", "O"],}, + None, + [{"entity": "city", "start": 0, "end": 6, "value": "Berlin"}], + ), + ], +) +def test_convert_tags_to_entities( + text: Text, + tags: Dict[Text, List[Text]], + confidences: Dict[Text, List[float]], + expected_entities: List[Dict[Text, Any]], +): + extractor = EntityExtractor() + tokenizer = WhitespaceTokenizer() + + message = Message(text) + tokens = tokenizer.tokenize(message, TEXT) + + actual_entities = extractor.convert_predictions_into_entities( + text, tokens, tags, confidences + ) + assert actual_entities == expected_entities + + +@pytest.mark.parametrize( + "text, warnings", + [ + ( + "## intent:test\n" + "- I want to fly from [Berlin](location) to [ San Fransisco](location)\n", + 1, + ), + ( + "## intent:test\n" + "- I want to fly from [Berlin ](location) to [San Fransisco](location)\n", + 1, + ), + ( + "## intent:test\n" + "- I want to fly from [Berlin](location) to [San Fransisco.](location)\n" + "- I have nothing to say.", + 1, + ), + ( + "## intent:test\n" + "- I have nothing to say.\n" + "- I want to fly from [Berlin](location) to[San Fransisco](location)\n", + 1, + ), + ( + "## intent:test\n" + "- I want to fly from [Berlin](location) to[San Fransisco](location)\n" + "- Book a flight from [London](location) to [Paris.](location)\n", + 2, + ), + ], +) +def test_check_check_correct_entity_annotations(text: Text, warnings: int): + reader = MarkdownReader() + tokenizer = WhitespaceTokenizer() + + training_data = reader.reads(text) + tokenizer.train(training_data) + + with pytest.warns(UserWarning) as record: + EntityExtractor.check_correct_entity_annotations(training_data) + + assert len(record) == warnings + assert all( + [excerpt in record[0].message.args[0]] + for excerpt in ["Misaligned entity annotation in sentence"] + ) diff --git a/tests/nlu/extractors/test_regex_entity_extractor.py b/tests/nlu/extractors/test_regex_entity_extractor.py new file mode 100644 index 000000000000..4ce1155e3679 --- /dev/null +++ b/tests/nlu/extractors/test_regex_entity_extractor.py @@ -0,0 +1,208 @@ +from typing import Any, Text, Dict, List + +import pytest + +from rasa.nlu.training_data import TrainingData +from rasa.nlu.constants import ENTITIES +from rasa.nlu.training_data import Message +from rasa.nlu.extractors.regex_entity_extractor import RegexEntityExtractor + + +@pytest.mark.parametrize( + "text, lookup, expected_entities", + [ + ( + "Berlin and London are cities.", + [ + { + "name": "city", + "elements": ["Berlin", "Amsterdam", "New York", "London"], + } + ], + [ + { + "entity": "city", + "value": "Berlin", + "start": 0, + "end": 6, + "extractor": "RegexEntityExtractor", + }, + { + "entity": "city", + "value": "London", + "start": 11, + "end": 17, + "extractor": "RegexEntityExtractor", + }, + ], + ), + ( + "Sophie is visiting Thomas in Berlin.", + [ + { + "name": "city", + "elements": ["Berlin", "Amsterdam", "New York", "London"], + }, + {"name": "person", "elements": ["Max", "John", "Sophie", "Lisa"]}, + ], + [ + { + "entity": "city", + "value": "Berlin", + "start": 29, + "end": 35, + "extractor": "RegexEntityExtractor", + }, + { + "entity": "person", + "value": "Sophie", + "start": 0, + "end": 6, + "extractor": "RegexEntityExtractor", + }, + ], + ), + ( + "Rasa is great.", + [ + { + "name": "city", + "elements": ["Berlin", "Amsterdam", "New York", "London"], + }, + {"name": "person", "elements": ["Max", "John", "Sophie", "Lisa"]}, + ], + [], + ), + ], +) +def test_process( + text: Text, + lookup: List[Dict[Text, List[Text]]], + expected_entities: List[Dict[Text, Any]], +): + message = Message(text) + + training_data = TrainingData() + training_data.lookup_tables = lookup + training_data.training_examples = [ + Message("Hi Max!", data={"entities": [{"entity": "person", "value": "Max"}]}), + Message( + "I live in Berlin", + data={"entities": [{"entity": "city", "value": "Berlin"}]}, + ), + ] + + entity_extractor = RegexEntityExtractor() + entity_extractor.train(training_data) + entity_extractor.process(message) + + entities = message.get(ENTITIES) + assert entities == expected_entities + + +@pytest.mark.parametrize( + "text, case_sensitive, lookup, expected_entities", + [ + ( + "berlin and London are cities.", + True, + [ + { + "name": "city", + "elements": ["Berlin", "Amsterdam", "New York", "London"], + } + ], + [ + { + "entity": "city", + "value": "London", + "start": 11, + "end": 17, + "extractor": "RegexEntityExtractor", + } + ], + ), + ( + "berlin and London are cities.", + False, + [ + { + "name": "city", + "elements": ["Berlin", "Amsterdam", "New York", "london"], + } + ], + [ + { + "entity": "city", + "value": "berlin", + "start": 0, + "end": 6, + "extractor": "RegexEntityExtractor", + }, + { + "entity": "city", + "value": "London", + "start": 11, + "end": 17, + "extractor": "RegexEntityExtractor", + }, + ], + ), + ], +) +def test_lowercase( + text: Text, + case_sensitive: bool, + lookup: List[Dict[Text, List[Text]]], + expected_entities: List[Dict[Text, Any]], +): + message = Message(text) + training_data = TrainingData() + training_data.lookup_tables = lookup + training_data.training_examples = [ + Message("Hi Max!", data={"entities": [{"entity": "person", "value": "Max"}]}), + Message( + "I live in Berlin", + data={"entities": [{"entity": "city", "value": "Berlin"}]}, + ), + ] + + entity_extractor = RegexEntityExtractor({"case_sensitive": case_sensitive}) + entity_extractor.train(training_data) + entity_extractor.process(message) + + entities = message.get(ENTITIES) + assert entities == expected_entities + + +def test_do_not_overwrite_any_entities(): + message = Message("Max lives in Berlin.") + message.set(ENTITIES, [{"entity": "person", "value": "Max", "start": 0, "end": 3}]) + + training_data = TrainingData() + training_data.training_examples = [ + Message("Hi Max!", data={"entities": [{"entity": "person", "value": "Max"}]}), + Message( + "I live in Berlin", + data={"entities": [{"entity": "city", "value": "Berlin"}]}, + ), + ] + training_data.lookup_tables = [ + {"name": "city", "elements": ["London", "Berlin", "Amsterdam"]} + ] + + entity_extractor = RegexEntityExtractor() + entity_extractor.train(training_data) + entity_extractor.process(message) + + entities = message.get(ENTITIES) + assert entities == [ + {"entity": "person", "value": "Max", "start": 0, "end": 3}, + { + "entity": "city", + "value": "Berlin", + "start": 13, + "end": 19, + "extractor": "RegexEntityExtractor", + }, + ] diff --git a/tests/nlu/extractors/test_spacy_entity_extractors.py b/tests/nlu/extractors/test_spacy_entity_extractors.py new file mode 100644 index 000000000000..f417ae0f422d --- /dev/null +++ b/tests/nlu/extractors/test_spacy_entity_extractors.py @@ -0,0 +1,53 @@ +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import Message + + +def test_spacy_ner_extractor(component_builder, spacy_nlp): + _config = RasaNLUModelConfig({"pipeline": [{"name": "SpacyEntityExtractor"}]}) + ext = component_builder.create_component(_config.for_component(0), _config) + example = Message( + "anywhere in the U.K.", + { + "intent": "restaurant_search", + "entities": [], + "text_spacy_doc": spacy_nlp("anywhere in the west"), + }, + ) + + ext.process(example, spacy_nlp=spacy_nlp) + + assert len(example.get("entities", [])) == 1 + assert example.get("entities")[0] == { + "start": 16, + "extractor": "SpacyEntityExtractor", + "end": 20, + "value": "U.K.", + "entity": "GPE", + "confidence": None, + } + + # Test dimension filtering includes only specified dimensions + + example = Message( + "anywhere in the West with Sebastian Thrun", + { + "intent": "example_intent", + "entities": [], + "text_spacy_doc": spacy_nlp("anywhere in the West with Sebastian Thrun"), + }, + ) + _config = RasaNLUModelConfig({"pipeline": [{"name": "SpacyEntityExtractor"}]}) + + _config.set_component_attr(0, dimensions=["PERSON"]) + ext = component_builder.create_component(_config.for_component(0), _config) + ext.process(example, spacy_nlp=spacy_nlp) + + assert len(example.get("entities", [])) == 1 + assert example.get("entities")[0] == { + "start": 26, + "extractor": "SpacyEntityExtractor", + "end": 41, + "value": "Sebastian Thrun", + "entity": "PERSON", + "confidence": None, + } diff --git a/tests/nlu/featurizers/__init__.py b/tests/nlu/featurizers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/featurizers/test_convert_featurizer.py b/tests/nlu/featurizers/test_convert_featurizer.py new file mode 100644 index 000000000000..97b0844d8bc5 --- /dev/null +++ b/tests/nlu/featurizers/test_convert_featurizer.py @@ -0,0 +1,95 @@ +import numpy as np +import pytest + +from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer +from rasa.nlu.tokenizers.tokenizer import Tokenizer +from rasa.nlu.training_data import TrainingData +from rasa.nlu.constants import TEXT, TOKENS_NAMES, RESPONSE, INTENT +from rasa.nlu.training_data import Message +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.featurizers.dense_featurizer.convert_featurizer import ConveRTFeaturizer + + +@pytest.mark.skip_on_windows +def test_convert_featurizer_process(component_builder): + tokenizer = component_builder.create_component_from_class(ConveRTTokenizer) + featurizer = component_builder.create_component_from_class(ConveRTFeaturizer) + + sentence = "Hey how are you today ?" + message = Message(sentence) + tokens = tokenizer.tokenize(message, attribute=TEXT) + message.set(TOKENS_NAMES[TEXT], tokens) + + featurizer.process(message, tf_hub_module=tokenizer.module) + + expected = np.array([2.2636216, -0.26475656, -1.1358104, -0.49751878, -1.3946456]) + expected_cls = np.array( + [1.0251294, -0.04053932, -0.7018805, -0.82054937, -0.75054353] + ) + + seq_vecs, sent_vecs = message.get_dense_features(TEXT, []) + + assert len(tokens) == len(seq_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5) + + +@pytest.mark.skip_on_windows +def test_convert_featurizer_train(component_builder): + tokenizer = component_builder.create_component_from_class(ConveRTTokenizer) + featurizer = component_builder.create_component_from_class(ConveRTFeaturizer) + + sentence = "Hey how are you today ?" + message = Message(sentence) + message.set(RESPONSE, sentence) + + tokens = tokenizer.tokenize(message, attribute=TEXT) + + message.set(TOKENS_NAMES[TEXT], tokens) + message.set(TOKENS_NAMES[RESPONSE], tokens) + + featurizer.train( + TrainingData([message]), RasaNLUModelConfig(), tf_hub_module=tokenizer.module + ) + + expected = np.array([2.2636216, -0.26475656, -1.1358104, -0.49751878, -1.3946456]) + expected_cls = np.array( + [1.0251294, -0.04053932, -0.7018805, -0.82054937, -0.75054353] + ) + + seq_vecs, sent_vecs = message.get_dense_features(TEXT, []) + + assert len(tokens) == len(seq_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5) + + seq_vecs, sent_vecs = message.get_dense_features(RESPONSE, []) + + assert len(tokens) == len(seq_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5) + + seq_vecs, sent_vecs = message.get_dense_features(INTENT, []) + + assert seq_vecs is None + assert sent_vecs is None + + +@pytest.mark.parametrize( + "sentence, expected_text", + [ + ("hello", "hello"), + ("you're", "you re"), + ("r. n. b.", "r n b"), + ("rock & roll", "rock & roll"), + ("ńöñàśçií", "ńöñàśçií"), + ], +) +@pytest.mark.skip_on_windows +def test_convert_featurizer_tokens_to_text(component_builder, sentence, expected_text): + tokenizer = component_builder.create_component_from_class(ConveRTTokenizer) + tokens = tokenizer.tokenize(Message(sentence), attribute=TEXT) + + actual_text = ConveRTFeaturizer._tokens_to_text([tokens])[0] + + assert expected_text == actual_text diff --git a/tests/nlu/featurizers/test_count_vectors_featurizer.py b/tests/nlu/featurizers/test_count_vectors_featurizer.py new file mode 100644 index 000000000000..42effc649aeb --- /dev/null +++ b/tests/nlu/featurizers/test_count_vectors_featurizer.py @@ -0,0 +1,399 @@ +import numpy as np +import pytest +import scipy.sparse + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.constants import TOKENS_NAMES, TEXT, INTENT, RESPONSE +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.training_data import Message +from rasa.nlu.training_data import TrainingData +from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import ( + CountVectorsFeaturizer, +) + + +@pytest.mark.parametrize( + "sentence, expected, expected_cls", + [ + ("hello hello hello hello hello", [[1]], [[5]]), + ("hello goodbye hello", [[0, 1]], [[1, 2]]), + ("a b c d e f", [[1, 0, 0, 0, 0, 0]], [[1, 1, 1, 1, 1, 1]]), + ("a 1 2", [[0, 1]], [[2, 1]]), + ], +) +def test_count_vector_featurizer(sentence, expected, expected_cls): + ftr = CountVectorsFeaturizer() + + train_message = Message(sentence) + test_message = Message(sentence) + + WhitespaceTokenizer().process(train_message) + WhitespaceTokenizer().process(test_message) + + ftr.train(TrainingData([train_message])) + + ftr.process(test_message) + + seq_vecs, sen_vecs = test_message.get_sparse_features(TEXT, []) + + assert isinstance(seq_vecs, scipy.sparse.coo_matrix) + assert isinstance(sen_vecs, scipy.sparse.coo_matrix) + + actual_seq_vecs = seq_vecs.toarray() + actual_sen_vecs = sen_vecs.toarray() + + assert np.all(actual_seq_vecs[0] == expected) + assert np.all(actual_sen_vecs[-1] == expected_cls) + + +@pytest.mark.parametrize( + "sentence, intent, response, intent_features, response_features", + [("hello", "greet", None, [[1]], None), ("hello", "greet", "hi", [[1]], [[1]])], +) +def test_count_vector_featurizer_response_attribute_featurization( + sentence, intent, response, intent_features, response_features +): + ftr = CountVectorsFeaturizer() + tk = WhitespaceTokenizer() + + train_message = Message(sentence) + # this is needed for a valid training example + train_message.set(INTENT, intent) + train_message.set(RESPONSE, response) + + # add a second example that has some response, so that the vocabulary for + # response exists + second_message = Message("hello") + second_message.set(RESPONSE, "hi") + second_message.set(INTENT, "greet") + + data = TrainingData([train_message, second_message]) + + tk.train(data) + ftr.train(data) + + intent_seq_vecs, intent_sen_vecs = train_message.get_sparse_features(INTENT, []) + response_seq_vecs, response_sen_vecs = train_message.get_sparse_features( + RESPONSE, [] + ) + + if intent_features: + assert intent_seq_vecs.toarray()[0] == intent_features + assert intent_sen_vecs is None + else: + assert intent_seq_vecs is None + assert intent_sen_vecs is None + + if response_features: + assert response_seq_vecs.toarray()[0] == response_features + assert response_sen_vecs is not None + else: + assert response_seq_vecs is None + assert response_sen_vecs is None + + +@pytest.mark.parametrize( + "sentence, intent, response, intent_features, response_features", + [ + ("hello hello hello hello hello ", "greet", None, [[1]], None), + ("hello goodbye hello", "greet", None, [[1]], None), + ("a 1 2", "char", "char char", [[1]], [[1]]), + ], +) +def test_count_vector_featurizer_attribute_featurization( + sentence, intent, response, intent_features, response_features +): + ftr = CountVectorsFeaturizer() + tk = WhitespaceTokenizer() + + train_message = Message(sentence) + # this is needed for a valid training example + train_message.set(INTENT, intent) + train_message.set(RESPONSE, response) + + data = TrainingData([train_message]) + + tk.train(data) + ftr.train(data) + + intent_seq_vecs, intent_sen_vecs = train_message.get_sparse_features(INTENT, []) + response_seq_vecs, response_sen_vecs = train_message.get_sparse_features( + RESPONSE, [] + ) + if intent_features: + assert intent_seq_vecs.toarray()[0] == intent_features + assert intent_sen_vecs is None + else: + assert intent_seq_vecs is None + assert intent_sen_vecs is None + + if response_features: + assert response_seq_vecs.toarray()[0] == response_features + assert response_sen_vecs is not None + else: + assert response_seq_vecs is None + assert response_sen_vecs is None + + +@pytest.mark.parametrize( + "sentence, intent, response, text_features, intent_features, response_features", + [ + ("hello hello greet ", "greet", "hello", [[0, 1]], [[1, 0]], [[0, 1]]), + ( + "I am fine", + "acknowledge", + "good", + [[0, 0, 0, 0, 1]], + [[1, 0, 0, 0, 0]], + [[0, 0, 0, 1, 0]], + ), + ], +) +def test_count_vector_featurizer_shared_vocab( + sentence, intent, response, text_features, intent_features, response_features +): + ftr = CountVectorsFeaturizer({"use_shared_vocab": True}) + tk = WhitespaceTokenizer() + + train_message = Message(sentence) + # this is needed for a valid training example + train_message.set(INTENT, intent) + train_message.set(RESPONSE, response) + + data = TrainingData([train_message]) + tk.train(data) + ftr.train(data) + + seq_vec, sen_vec = train_message.get_sparse_features(TEXT, []) + assert np.all(seq_vec.toarray()[0] == text_features) + assert sen_vec is not None + seq_vec, sen_vec = train_message.get_sparse_features(INTENT, []) + assert np.all(seq_vec.toarray()[0] == intent_features) + assert sen_vec is None + seq_vec, sen_vec = train_message.get_sparse_features(RESPONSE, []) + assert np.all(seq_vec.toarray()[0] == response_features) + assert sen_vec is not None + + +@pytest.mark.parametrize( + "sentence, expected", + [ + ("hello hello hello hello hello __OOV__", [[0, 1]]), + ("hello goodbye hello __oov__", [[0, 0, 1]]), + ("a b c d e f __oov__ __OOV__ __OOV__", [[0, 1, 0, 0, 0, 0, 0]]), + ("__OOV__ a 1 2 __oov__ __OOV__", [[0, 1, 0]]), + ], +) +def test_count_vector_featurizer_oov_token(sentence, expected): + ftr = CountVectorsFeaturizer({"OOV_token": "__oov__"}) + train_message = Message(sentence) + WhitespaceTokenizer().process(train_message) + + data = TrainingData([train_message]) + ftr.train(data) + + test_message = Message(sentence) + ftr.process(test_message) + + seq_vec, sen_vec = train_message.get_sparse_features(TEXT, []) + assert np.all(seq_vec.toarray()[0] == expected) + assert sen_vec is not None + + +@pytest.mark.parametrize( + "sentence, expected", + [ + ("hello hello hello hello hello oov_word0", [[0, 1]]), + ("hello goodbye hello oov_word0 OOV_word0", [[0, 0, 1]]), + ("a b c d e f __oov__ OOV_word0 oov_word1", [[0, 1, 0, 0, 0, 0, 0]]), + ("__OOV__ a 1 2 __oov__ OOV_word1", [[0, 1, 0]]), + ], +) +def test_count_vector_featurizer_oov_words(sentence, expected): + + ftr = CountVectorsFeaturizer( + {"OOV_token": "__oov__", "OOV_words": ["oov_word0", "OOV_word1"]} + ) + train_message = Message(sentence) + WhitespaceTokenizer().process(train_message) + + data = TrainingData([train_message]) + ftr.train(data) + + test_message = Message(sentence) + ftr.process(test_message) + + seq_vec, sen_vec = train_message.get_sparse_features(TEXT, []) + assert np.all(seq_vec.toarray()[0] == expected) + assert sen_vec is not None + + +@pytest.mark.parametrize( + "tokens, expected", + [ + (["hello", "hello", "hello", "hello", "hello"], [[1]]), + (["你好", "你好", "你好", "你好", "你好"], [[1]]), # test for unicode chars + (["hello", "goodbye", "hello"], [[0, 1]]), + # Note: order has changed in Chinese version of "hello" & "goodbye" + (["你好", "再见", "你好"], [[1, 0]]), # test for unicode chars + (["a", "b", "c", "d", "e", "f"], [[1, 0, 0, 0, 0, 0]]), + (["a", "1", "2"], [[0, 1]]), + ], +) +def test_count_vector_featurizer_using_tokens(tokens, expected): + + ftr = CountVectorsFeaturizer() + + # using empty string instead of real text string to make sure + # count vector only can come from `tokens` feature. + # using `message.text` can not get correct result + + tokens_feature = [Token(i, 0) for i in tokens] + + train_message = Message("") + train_message.set(TOKENS_NAMES[TEXT], tokens_feature) + + data = TrainingData([train_message]) + + ftr.train(data) + + test_message = Message("") + test_message.set(TOKENS_NAMES[TEXT], tokens_feature) + + ftr.process(test_message) + + seq_vec, sen_vec = train_message.get_sparse_features(TEXT, []) + assert np.all(seq_vec.toarray()[0] == expected) + assert sen_vec is not None + + +@pytest.mark.parametrize( + "sentence, expected", + [ + ("ababab", [[3, 3, 3, 2]]), + ("ab ab ab", [[0, 0, 1, 1, 1, 0]]), + ("abc", [[1, 1, 1, 1, 1]]), + ], +) +def test_count_vector_featurizer_char(sentence, expected): + ftr = CountVectorsFeaturizer({"min_ngram": 1, "max_ngram": 2, "analyzer": "char"}) + + train_message = Message(sentence) + WhitespaceTokenizer().process(train_message) + + data = TrainingData([train_message]) + ftr.train(data) + + test_message = Message(sentence) + WhitespaceTokenizer().process(test_message) + ftr.process(test_message) + + seq_vec, sen_vec = train_message.get_sparse_features(TEXT, []) + assert np.all(seq_vec.toarray()[0] == expected) + assert sen_vec is not None + + +def test_count_vector_featurizer_persist_load(tmp_path): + + # set non default values to config + config = { + "analyzer": "char", + "strip_accents": "ascii", + "stop_words": "stop", + "min_df": 2, + "max_df": 3, + "min_ngram": 2, + "max_ngram": 3, + "max_features": 10, + "lowercase": False, + } + train_ftr = CountVectorsFeaturizer(config) + + sentence1 = "ababab 123 13xc лаомтгцу sfjv oö aà" + sentence2 = "abababalidcn 123123 13xcdc лаомтгцу sfjv oö aà" + train_message1 = Message(sentence1) + train_message2 = Message(sentence2) + + data = TrainingData([train_message1, train_message2]) + train_ftr.train(data) + + # persist featurizer + file_dict = train_ftr.persist("ftr", str(tmp_path)) + train_vect_params = { + attribute: vectorizer.get_params() + for attribute, vectorizer in train_ftr.vectorizers.items() + } + + # add trained vocabulary to vectorizer params + for attribute, attribute_vect_params in train_vect_params.items(): + if hasattr(train_ftr.vectorizers[attribute], "vocabulary_"): + train_vect_params[attribute].update( + {"vocabulary": train_ftr.vectorizers[attribute].vocabulary_} + ) + + # load featurizer + meta = train_ftr.component_config.copy() + meta.update(file_dict) + test_ftr = CountVectorsFeaturizer.load(meta, str(tmp_path)) + test_vect_params = { + attribute: vectorizer.get_params() + for attribute, vectorizer in test_ftr.vectorizers.items() + } + + assert train_vect_params == test_vect_params + + # check if vocaculary was loaded correctly + assert hasattr(test_ftr.vectorizers[TEXT], "vocabulary_") + + test_message1 = Message(sentence1) + test_ftr.process(test_message1) + test_message2 = Message(sentence2) + test_ftr.process(test_message2) + + test_seq_vec_1, test_sen_vec_1 = test_message1.get_sparse_features(TEXT, []) + train_seq_vec_1, train_sen_vec_1 = train_message1.get_sparse_features(TEXT, []) + test_seq_vec_2, test_sen_vec_2 = test_message2.get_sparse_features(TEXT, []) + train_seq_vec_2, train_sen_vec_2 = train_message2.get_sparse_features(TEXT, []) + + # check that train features and test features after loading are the same + assert np.all(test_seq_vec_1.toarray() == train_seq_vec_1.toarray()) + assert np.all(test_sen_vec_1.toarray() == train_sen_vec_1.toarray()) + assert np.all(test_seq_vec_2.toarray() == train_seq_vec_2.toarray()) + assert np.all(test_sen_vec_2.toarray() == train_sen_vec_2.toarray()) + + +def test_count_vectors_featurizer_train(): + + featurizer = CountVectorsFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "Hey how are you today ?" + message = Message(sentence) + message.set(RESPONSE, sentence) + message.set(INTENT, "intent") + WhitespaceTokenizer().train(TrainingData([message])) + + featurizer.train(TrainingData([message]), RasaNLUModelConfig()) + + expected = np.array([0, 1, 0, 0, 0]) + expected_cls = np.array([1, 1, 1, 1, 1]) + + seq_vec, sen_vec = message.get_sparse_features(TEXT, []) + + assert (5, 5) == seq_vec.shape + assert (1, 5) == sen_vec.shape + assert np.all(seq_vec.toarray()[0] == expected) + assert np.all(sen_vec.toarray()[-1] == expected_cls) + + seq_vec, sen_vec = message.get_sparse_features(RESPONSE, []) + + assert (5, 5) == seq_vec.shape + assert (1, 5) == sen_vec.shape + assert np.all(seq_vec.toarray()[0] == expected) + assert np.all(sen_vec.toarray()[-1] == expected_cls) + + seq_vec, sen_vec = message.get_sparse_features(INTENT, []) + + assert sen_vec is None + assert (1, 1) == seq_vec.shape + assert np.all(seq_vec.toarray()[0] == np.array([1])) diff --git a/tests/nlu/featurizers/test_featurizer.py b/tests/nlu/featurizers/test_featurizer.py new file mode 100644 index 000000000000..c2034d47da77 --- /dev/null +++ b/tests/nlu/featurizers/test_featurizer.py @@ -0,0 +1,169 @@ +import numpy as np +import pytest +import scipy.sparse + +from rasa.nlu.classifiers.diet_classifier import ( + DIETClassifier, + TEXT_SENTENCE_FEATURES, + TEXT_SEQUENCE_FEATURES, + LABEL_SEQUENCE_FEATURES, + LABEL_SENTENCE_FEATURES, +) +from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import ( + CountVectorsFeaturizer, +) +from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import ( + LexicalSyntacticFeaturizer, +) +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.featurizers.featurizer import DenseFeaturizer, Features +from rasa.nlu.constants import ( + TEXT, + FEATURIZER_CLASS_ALIAS, + FEATURE_TYPE_SEQUENCE, + FEATURE_TYPE_SENTENCE, +) +from rasa.utils.tensorflow.constants import FEATURIZERS + + +def test_combine_with_existing_dense_features(): + existing_features = Features( + np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test" + ) + new_features = np.array([[1, 0], [0, 1]]) + expected_features = np.array([[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]]) + + actual_features = existing_features.combine_with_features(new_features) + + assert np.all(expected_features == actual_features) + + +def test_combine_with_existing_dense_features_shape_mismatch(): + existing_features = Features( + np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test" + ) + new_features = np.array([[0, 1]]) + + with pytest.raises(ValueError): + existing_features.combine_with_features(new_features) + + +def test_combine_with_existing_sparse_features(): + existing_features = Features( + scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "test", + ) + new_features = scipy.sparse.csr_matrix([[1, 0], [0, 1]]) + expected_features = [[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]] + + actual_features = existing_features.combine_with_features(new_features) + actual_features = actual_features.toarray() + + assert np.all(expected_features == actual_features) + + +def test_combine_with_existing_sparse_features_shape_mismatch(): + existing_features = Features( + scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "test", + ) + new_features = scipy.sparse.csr_matrix([[0, 1]]) + + with pytest.raises(ValueError): + existing_features.combine_with_features(new_features) + + +@pytest.mark.parametrize( + "pooling, features, expected", + [ + ( + "mean", + np.array([[0.5, 3, 0.4, 0.1], [0, 0, 0, 0], [0.5, 3, 0.4, 0.1]]), + np.array([[0.5, 3, 0.4, 0.1]]), + ), + ( + "max", + np.array([[1.0, 3.0, 0.0, 2.0], [4.0, 3.0, 1.0, 0.0]]), + np.array([[4.0, 3.0, 1.0, 2.0]]), + ), + ( + "max", + np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]), + np.array([[0.0, 0.0, 0.0, 0.0]]), + ), + ], +) +def test_calculate_cls_vector(pooling, features, expected): + actual = DenseFeaturizer._calculate_sentence_features(features, pooling) + + assert np.all(actual == expected) + + +def test_flexible_nlu_pipeline(): + message = Message("This is a test message.", data={"intent": "test"}) + training_data = TrainingData([message, message, message, message, message]) + + tokenizer = WhitespaceTokenizer() + tokenizer.train(training_data) + + featurizer = CountVectorsFeaturizer( + component_config={FEATURIZER_CLASS_ALIAS: "cvf_word"} + ) + featurizer.train(training_data) + + featurizer = CountVectorsFeaturizer( + component_config={ + FEATURIZER_CLASS_ALIAS: "cvf_char", + "min_ngram": 1, + "max_ngram": 3, + "analyzer": "char_wb", + } + ) + featurizer.train(training_data) + + featurizer = LexicalSyntacticFeaturizer({}) + featurizer.train(training_data) + + assert len(message.features) == 6 + assert message.features[0].origin == "cvf_word" + assert message.features[0].type == FEATURE_TYPE_SEQUENCE + assert message.features[1].origin == "cvf_word" + assert message.features[1].type == FEATURE_TYPE_SENTENCE + # cvf word is also extracted for the intent + assert message.features[2].origin == "cvf_word" + assert message.features[2].type == FEATURE_TYPE_SEQUENCE + assert message.features[3].origin == "cvf_char" + assert message.features[3].type == FEATURE_TYPE_SEQUENCE + assert message.features[4].origin == "cvf_char" + assert message.features[4].type == FEATURE_TYPE_SENTENCE + assert message.features[5].origin == "LexicalSyntacticFeaturizer" + assert message.features[5].type == FEATURE_TYPE_SEQUENCE + + sequence_feature_dim = ( + message.features[0].features.shape[1] + message.features[5].features.shape[1] + ) + sentence_feature_dim = message.features[0].features.shape[1] + + classifier = DIETClassifier( + component_config={FEATURIZERS: ["cvf_word", "LexicalSyntacticFeaturizer"]} + ) + model_data = classifier.preprocess_train_data(training_data) + + assert len(model_data.get(TEXT_SENTENCE_FEATURES)) == 1 + assert len(model_data.get(TEXT_SEQUENCE_FEATURES)) == 1 + assert len(model_data.get(LABEL_SEQUENCE_FEATURES)) == 1 + assert len(model_data.get(LABEL_SENTENCE_FEATURES)) == 0 + assert model_data.get(TEXT_SEQUENCE_FEATURES)[0][0].shape == ( + 5, + sequence_feature_dim, + ) + assert model_data.get(TEXT_SENTENCE_FEATURES)[0][0].shape == ( + 1, + sentence_feature_dim, + ) + assert model_data.get(LABEL_SEQUENCE_FEATURES)[0][0].shape == (1, 1) diff --git a/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py new file mode 100644 index 000000000000..b8a5375636f6 --- /dev/null +++ b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py @@ -0,0 +1,129 @@ +import numpy as np +import pytest + +import scipy.sparse + +from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import ( + LexicalSyntacticFeaturizer, +) +from rasa.nlu.training_data import TrainingData +from rasa.nlu.constants import TEXT, SPACY_DOCS +from rasa.nlu.training_data import Message + + +@pytest.mark.parametrize( + "sentence, expected_features", + [ + ( + "hello goodbye hello", + [ + [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], + [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], + [1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 3.0, 1.0, 2.0, 1.0, 1.0, 2.0], + ], + ), + ( + "a 1", + [ + [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + ], + ), + ], +) +def test_text_featurizer(sentence, expected_features): + featurizer = LexicalSyntacticFeaturizer( + { + "features": [ + ["BOS", "upper"], + ["BOS", "EOS", "prefix2", "digit"], + ["EOS", "low"], + ] + } + ) + + train_message = Message(sentence) + test_message = Message(sentence) + + WhitespaceTokenizer().process(train_message) + WhitespaceTokenizer().process(test_message) + + featurizer.train(TrainingData([train_message])) + + featurizer.process(test_message) + + seq_vec, sen_vec = test_message.get_sparse_features(TEXT, []) + + assert isinstance(seq_vec, scipy.sparse.coo_matrix) + assert sen_vec is None + + assert np.all(seq_vec.toarray() == expected_features[:-1]) + + +@pytest.mark.parametrize( + "sentence, expected", + [("hello 123 hello 123 hello", [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0])], +) +def test_text_featurizer_window_size(sentence, expected): + featurizer = LexicalSyntacticFeaturizer( + {"features": [["upper"], ["digit"], ["low"], ["digit"]]} + ) + + train_message = Message(sentence) + test_message = Message(sentence) + + WhitespaceTokenizer().process(train_message) + WhitespaceTokenizer().process(test_message) + + featurizer.train(TrainingData([train_message])) + + featurizer.process(test_message) + + seq_vec, sen_vec = test_message.get_sparse_features(TEXT, []) + + assert isinstance(seq_vec, scipy.sparse.coo_matrix) + assert sen_vec is None + + assert np.all(seq_vec.toarray()[0] == expected) + + +@pytest.mark.parametrize( + "sentence, expected", + [ + ( + "The sun is shining", + [ + [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], + ], + ) + ], +) +def test_text_featurizer_using_pos(sentence, expected, spacy_nlp): + featurizer = LexicalSyntacticFeaturizer({"features": [["pos", "pos2"]]}) + + train_message = Message(sentence) + test_message = Message(sentence) + + train_message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + test_message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + + SpacyTokenizer().process(train_message) + SpacyTokenizer().process(test_message) + + featurizer.train(TrainingData([train_message])) + + featurizer.process(test_message) + + seq_vec, sen_vec = test_message.get_sparse_features(TEXT, []) + + assert isinstance(seq_vec, scipy.sparse.coo_matrix) + assert sen_vec is None + + assert np.all(seq_vec.toarray() == expected) diff --git a/tests/nlu/featurizers/test_lm_featurizer.py b/tests/nlu/featurizers/test_lm_featurizer.py new file mode 100644 index 000000000000..a93e0e5e384f --- /dev/null +++ b/tests/nlu/featurizers/test_lm_featurizer.py @@ -0,0 +1,217 @@ +import numpy as np +import pytest + +from rasa.nlu.training_data import TrainingData +from rasa.nlu.featurizers.dense_featurizer.lm_featurizer import LanguageModelFeaturizer +from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP +from rasa.nlu.constants import TEXT, INTENT +from rasa.nlu.training_data import Message + + +@pytest.mark.skip(reason="Results in random crashing of github action workers") +@pytest.mark.parametrize( + "model_name, texts, expected_shape, expected_sequence_vec, expected_cls_vec", + [ + ( + "bert", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [0.5727445, -0.16078179], + [-0.5485125, 0.09632876, -0.4278888, 0.11438395, 0.18316492], + ], + [ + [0.068804, 0.32802248, -0.11250398, -0.11338018, -0.37116352], + [0.05909364, 0.06433402, 0.08569086, -0.16530034, -0.11396906], + ], + ), + ( + "gpt", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [-0.0630323737859726, 0.4029877185821533], + [ + 0.8072432279586792, + -0.08990508317947388, + 0.9985930919647217, + -0.38779014348983765, + 0.08921952545642853, + ], + ], + [ + [ + 0.16997766494750977, + 0.1493849903345108, + 0.39421725273132324, + -0.5753618478775024, + 0.05096133053302765, + ], + [ + 0.41056010127067566, + -0.1169343888759613, + -0.3019704818725586, + -0.40207183361053467, + 0.6289798021316528, + ], + ], + ), + ( + "gpt2", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [-0.03382749, -0.05373593], + [-0.18434484, -0.5386464, -0.11122551, -0.95434338, 0.28311089], + ], + [ + [ + -0.04710008203983307, + -0.2793063223361969, + -0.23804056644439697, + -0.3212292492389679, + 0.11430201679468155, + ], + [ + -0.1809544414281845, + -0.017152192071080208, + -0.3176477551460266, + -0.008387327194213867, + 0.3365338146686554, + ], + ], + ), + ( + "xlnet", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [1.7612367868423462, 2.5819129943847656], + [ + 0.784195065498352, + 0.7068007588386536, + 1.5883606672286987, + 1.891886591911316, + 2.5209126472473145, + ], + ], + [ + [ + 2.171574831008911, + -1.5377449989318848, + -3.2671749591827393, + 0.22520869970321655, + -1.598855972290039, + ], + [ + 1.6516317129135132, + 0.021670114248991013, + -2.5114030838012695, + 1.447351098060608, + -2.5866634845733643, + ], + ], + ), + ( + "distilbert", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [0.22866562008857727, -0.0575055330991745], + [ + -0.6448041796684265, + -0.5105321407318115, + -0.4892978072166443, + 0.17531153559684753, + 0.22717803716659546, + ], + ], + [ + [ + -0.09814466536045074, + -0.07325993478298187, + 0.22358475625514984, + -0.20274735987186432, + -0.07363069802522659, + ], + [ + -0.146609365940094, + -0.07373693585395813, + 0.016850866377353668, + -0.2407529354095459, + -0.0979844480752945, + ], + ], + ), + ( + "roberta", + ["Good evening.", "here is the sentence I want embeddings for."], + [(3, 768), (9, 768)], + [ + [-0.3092685, 0.09567838], + [0.02152853, -0.08026707, -0.1080862, 0.12423468, -0.05378958], + ], + [ + [ + -0.03930358216166496, + 0.034788478165864944, + 0.12246038764715195, + 0.08401528000831604, + 0.7026961445808411, + ], + [ + -0.018586941063404083, + -0.09835464507341385, + 0.03242188319563866, + 0.09366855770349503, + 0.4458026587963104, + ], + ], + ), + ], +) +def test_lm_featurizer_shape_values( + model_name, texts, expected_shape, expected_sequence_vec, expected_cls_vec +): + transformers_config = {"model_name": model_name} + + transformers_nlp = HFTransformersNLP(transformers_config) + lm_featurizer = LanguageModelFeaturizer() + + messages = [] + for text in texts: + messages.append(Message.build(text=text)) + td = TrainingData(messages) + + transformers_nlp.train(td) + lm_featurizer.train(td) + + for index in range(len(texts)): + + computed_sequence_vec, computed_sentence_vec = messages[ + index + ].get_dense_features(TEXT, []) + + assert computed_sequence_vec.shape[0] == expected_shape[index][0] - 1 + assert computed_sequence_vec.shape[1] == expected_shape[index][1] + assert computed_sentence_vec.shape[0] == 1 + assert computed_sentence_vec.shape[1] == expected_shape[index][1] + + # Look at the value of first dimension for a few starting timesteps + assert np.allclose( + computed_sequence_vec[: len(expected_sequence_vec[index]), 0], + expected_sequence_vec[index], + atol=1e-5, + ) + + # Look at the first value of first five dimensions + assert np.allclose( + computed_sentence_vec[0][:5], expected_cls_vec[index], atol=1e-5 + ) + + intent_sequence_vec, intent_sentence_vec = messages[index].get_dense_features( + INTENT, [] + ) + + assert intent_sequence_vec is None + assert intent_sentence_vec is None diff --git a/tests/nlu/featurizers/test_mitie_featurizer.py b/tests/nlu/featurizers/test_mitie_featurizer.py new file mode 100644 index 000000000000..15951cc56a8f --- /dev/null +++ b/tests/nlu/featurizers/test_mitie_featurizer.py @@ -0,0 +1,67 @@ +import numpy as np + +from rasa.nlu.constants import TEXT, RESPONSE, INTENT, TOKENS_NAMES +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.featurizers.dense_featurizer.mitie_featurizer import MitieFeaturizer + + +def test_mitie_featurizer(mitie_feature_extractor): + + featurizer = MitieFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "Hey how are you today" + message = Message(sentence) + MitieTokenizer().process(message) + tokens = message.get(TOKENS_NAMES[TEXT]) + + seq_vec, sen_vec = featurizer.features_for_tokens(tokens, mitie_feature_extractor) + + expected = np.array( + [0.00000000e00, -5.12735510e00, 4.39929873e-01, -5.60760403e00, -8.26445103e00] + ) + expected_cls = np.array([0.0, -4.4551446, 0.26073121, -1.46632245, -1.84205751]) + + assert 6 == len(seq_vec) + len(sen_vec) + assert np.allclose(seq_vec[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vec[-1][:5], expected_cls, atol=1e-5) + + +def test_mitie_featurizer_train(mitie_feature_extractor): + + featurizer = MitieFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "Hey how are you today" + message = Message(sentence) + message.set(RESPONSE, sentence) + message.set(INTENT, "intent") + MitieTokenizer().train(TrainingData([message])) + + featurizer.train( + TrainingData([message]), + RasaNLUModelConfig(), + **{"mitie_feature_extractor": mitie_feature_extractor}, + ) + + expected = np.array( + [0.00000000e00, -5.12735510e00, 4.39929873e-01, -5.60760403e00, -8.26445103e00] + ) + expected_cls = np.array([0.0, -4.4551446, 0.26073121, -1.46632245, -1.84205751]) + + seq_vec, sen_vec = message.get_dense_features(TEXT, []) + + assert len(message.get(TOKENS_NAMES[TEXT])) == len(seq_vec) + assert np.allclose(seq_vec[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vec[-1][:5], expected_cls, atol=1e-5) + + seq_vec, sen_vec = message.get_dense_features(RESPONSE, []) + + assert len(message.get(TOKENS_NAMES[RESPONSE])) == len(seq_vec) + assert np.allclose(seq_vec[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vec[-1][:5], expected_cls, atol=1e-5) + + seq_vec, sen_vec = message.get_dense_features(INTENT, []) + + assert seq_vec is None + assert sen_vec is None diff --git a/tests/nlu/featurizers/test_regex_featurizer.py b/tests/nlu/featurizers/test_regex_featurizer.py new file mode 100644 index 000000000000..b64717a11a51 --- /dev/null +++ b/tests/nlu/featurizers/test_regex_featurizer.py @@ -0,0 +1,263 @@ +from typing import Text, List, Any + +import numpy as np +import pytest + +from rasa.nlu.training_data import TrainingData +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer +from rasa.nlu.constants import TEXT, RESPONSE, SPACY_DOCS, TOKENS_NAMES, INTENT +from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer +from rasa.nlu.training_data import Message + + +@pytest.mark.parametrize( + "sentence, expected, labeled_tokens", + [ + ( + "hey how are you today", + [ + [0.0, 1.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ], + [0], + ), + ( + "hey 456 how are you", + [ + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [1.0, 1.0, 0.0], + ], + [1, 0], + ), + ( + "blah balh random eh", + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], + [], + ), + ( + "a 1 digit number", + [ + [0.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + ], + [1, 1], + ), + ], +) +def test_regex_featurizer(sentence, expected, labeled_tokens, spacy_nlp): + from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer + + patterns = [ + {"pattern": "[0-9]+", "name": "number", "usage": "intent"}, + {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, + {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, + ] + ftr = RegexFeaturizer({}, known_patterns=patterns) + + # adds tokens to the message + tokenizer = SpacyTokenizer({}) + message = Message(sentence, data={RESPONSE: sentence}) + message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + tokenizer.process(message) + + sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT) + assert np.allclose(sequence_features.toarray(), expected[:-1], atol=1e-10) + assert np.allclose(sentence_features.toarray(), expected[-1], atol=1e-10) + + # the tokenizer should have added tokens + assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0 + # the number of regex matches on each token should match + for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])): + token_matches = token.get("pattern").values() + num_matches = sum(token_matches) + assert num_matches == labeled_tokens.count(i) + + +@pytest.mark.parametrize( + "sentence, expected, labeled_tokens", + [ + ( + "lemonade and mapo tofu", + [[1.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 1.0]], + [0.0, 2.0, 3.0], + ), + ( + "a cup of tea", + [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [1.0, 0.0], [1.0, 0.0]], + [3.0], + ), + ( + "Is burrito my favorite food?", + [ + [0.0, 0.0], + [0.0, 1.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 1.0], + ], + [1.0], + ), + ("I want club?mate", [[0.0, 0.0], [0.0, 0.0], [1.0, 0.0], [1.0, 0.0]], [2.0]), + ], +) +def test_lookup_tables(sentence, expected, labeled_tokens, spacy_nlp): + from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer + + lookups = [ + { + "name": "drinks", + "elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"], + }, + {"name": "plates", "elements": "data/test/lookup_tables/plates.txt"}, + ] + ftr = RegexFeaturizer() + training_data = TrainingData() + training_data.lookup_tables = lookups + ftr.train(training_data) + + # adds tokens to the message + component_config = {"name": "SpacyTokenizer"} + tokenizer = SpacyTokenizer(component_config) + message = Message(sentence) + message.set("text_spacy_doc", spacy_nlp(sentence)) + tokenizer.process(message) + + sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT) + assert np.allclose(sequence_features.toarray(), expected[:-1], atol=1e-10) + assert np.allclose(sentence_features.toarray(), expected[-1], atol=1e-10) + + # the tokenizer should have added tokens + assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0 + # the number of regex matches on each token should match + for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])): + token_matches = token.get("pattern").values() + num_matches = sum(token_matches) + assert num_matches == labeled_tokens.count(i) + + +@pytest.mark.parametrize( + "sentence, expected, expected_cls", + [ + ("hey how are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]), + ("hey 456 how are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]), + ("blah balh random eh", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]), + ("a 1 digit number", [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]), + ], +) +def test_regex_featurizer_no_sequence(sentence, expected, expected_cls, spacy_nlp): + + patterns = [ + {"pattern": "[0-9]+", "name": "number", "usage": "intent"}, + {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, + {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, + ] + ftr = RegexFeaturizer({}, known_patterns=patterns) + + # adds tokens to the message + tokenizer = SpacyTokenizer() + message = Message(sentence) + message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + tokenizer.process(message) + + sequence_featrures, sentence_features = ftr._features_for_patterns(message, TEXT) + assert np.allclose(sequence_featrures.toarray()[0], expected, atol=1e-10) + assert np.allclose(sentence_features.toarray()[-1], expected_cls, atol=1e-10) + + +def test_regex_featurizer_train(): + + patterns = [ + {"pattern": "[0-9]+", "name": "number", "usage": "intent"}, + {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, + {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, + ] + + featurizer = RegexFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "hey how are you today 19.12.2019 ?" + message = Message(sentence) + message.set(RESPONSE, sentence) + message.set(INTENT, "intent") + WhitespaceTokenizer().train(TrainingData([message])) + + featurizer.train( + TrainingData([message], regex_features=patterns), RasaNLUModelConfig() + ) + + expected = np.array([0, 1, 0]) + expected_cls = np.array([1, 1, 1]) + + seq_vecs, sen_vec = message.get_sparse_features(TEXT, []) + + assert (6, 3) == seq_vecs.shape + assert (1, 3) == sen_vec.shape + assert np.all(seq_vecs.toarray()[0] == expected) + assert np.all(sen_vec.toarray()[-1] == expected_cls) + + seq_vecs, sen_vec = message.get_sparse_features(RESPONSE, []) + + assert (6, 3) == seq_vecs.shape + assert (1, 3) == sen_vec.shape + assert np.all(seq_vecs.toarray()[0] == expected) + assert np.all(sen_vec.toarray()[-1] == expected_cls) + + seq_vecs, sen_vec = message.get_sparse_features(INTENT, []) + + assert seq_vecs is None + assert sen_vec is None + + +@pytest.mark.parametrize( + "sentence, sequence_vector, sentence_vector, case_sensitive", + [ + ("Hey How are you today", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], True), + ("Hey How are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], False), + ("Hey 456 How are you", [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], True), + ("Hey 456 How are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], False), + ], +) +def test_regex_featurizer_case_sensitive( + sentence: Text, + sequence_vector: List[float], + sentence_vector: List[float], + case_sensitive: bool, + spacy_nlp: Any, +): + + patterns = [ + {"pattern": "[0-9]+", "name": "number", "usage": "intent"}, + {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, + {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, + ] + ftr = RegexFeaturizer({"case_sensitive": case_sensitive}, known_patterns=patterns) + + # adds tokens to the message + tokenizer = SpacyTokenizer() + message = Message(sentence) + message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + tokenizer.process(message) + + sequence_featrures, sentence_features = ftr._features_for_patterns(message, TEXT) + assert np.allclose(sequence_featrures.toarray()[0], sequence_vector, atol=1e-10) + assert np.allclose(sentence_features.toarray()[-1], sentence_vector, atol=1e-10) diff --git a/tests/nlu/featurizers/test_spacy_featurizer.py b/tests/nlu/featurizers/test_spacy_featurizer.py new file mode 100644 index 000000000000..ac605214a0d1 --- /dev/null +++ b/tests/nlu/featurizers/test_spacy_featurizer.py @@ -0,0 +1,195 @@ +import numpy as np +import pytest + +from rasa.nlu import training_data +from rasa.nlu.training_data import Message +from rasa.nlu.training_data import TrainingData +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer +from rasa.nlu.constants import SPACY_DOCS, TEXT, RESPONSE, INTENT + + +def test_spacy_featurizer_cls_vector(spacy_nlp): + featurizer = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "Hey how are you today" + message = Message(sentence) + message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + + featurizer._set_spacy_features(message) + + seq_vecs, sen_vecs = message.get_dense_features(TEXT, []) + + expected = np.array([-0.28451, 0.31007, -0.57039, -0.073056, -0.17322]) + expected_cls = np.array([-0.196496, 0.3249364, -0.37408298, -0.10622784, 0.062756]) + + assert 5 == len(seq_vecs) + assert 1 == len(sen_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vecs[-1][:5], expected_cls, atol=1e-5) + + +@pytest.mark.parametrize("sentence", ["hey how are you today"]) +def test_spacy_featurizer(sentence, spacy_nlp): + + ftr = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + doc = spacy_nlp(sentence) + vecs = ftr._features_for_doc(doc) + expected = [t.vector for t in doc] + + assert np.allclose(vecs, expected, atol=1e-5) + + +def test_spacy_training_sample_alignment(spacy_nlp_component): + from spacy.tokens import Doc + + m1 = Message.build(text="I have a feeling", intent="feeling") + m2 = Message.build(text="", intent="feeling") + m3 = Message.build(text="I am the last message", intent="feeling") + td = TrainingData(training_examples=[m1, m2, m3]) + + attribute_docs = spacy_nlp_component.docs_for_training_data(td) + + assert isinstance(attribute_docs["text"][0], Doc) + assert isinstance(attribute_docs["text"][1], Doc) + assert isinstance(attribute_docs["text"][2], Doc) + + assert [t.text for t in attribute_docs["text"][0]] == ["i", "have", "a", "feeling"] + assert [t.text for t in attribute_docs["text"][1]] == [] + assert [t.text for t in attribute_docs["text"][2]] == [ + "i", + "am", + "the", + "last", + "message", + ] + + +def test_spacy_intent_featurizer(spacy_nlp_component): + from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer + + td = training_data.load_data("data/examples/rasa/demo-rasa.json") + spacy_nlp_component.train(td, config=None) + spacy_featurizer = SpacyFeaturizer() + spacy_featurizer.train(td, config=None) + + intent_features_exist = np.array( + [ + True if example.get("intent_features") is not None else False + for example in td.intent_examples + ] + ) + + # no intent features should have been set + assert not any(intent_features_exist) + + +@pytest.mark.parametrize( + "sentence, expected", + [("hey how are you today", [-0.28451, 0.31007, -0.57039, -0.073056, -0.17322])], +) +def test_spacy_featurizer_sequence(sentence, expected, spacy_nlp): + from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer + + doc = spacy_nlp(sentence) + token_vectors = [t.vector for t in doc] + + ftr = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + greet = {"intent": "greet", "text_features": [0.5]} + + message = Message(sentence, greet) + message.set(SPACY_DOCS[TEXT], doc) + + ftr._set_spacy_features(message) + + seq_vecs, sen_vecs = message.get_dense_features(TEXT, []) + vecs = seq_vecs[0][:5] + + assert np.allclose(token_vectors[0][:5], vecs, atol=1e-4) + assert np.allclose(vecs, expected, atol=1e-4) + assert sen_vecs is not None + + +def test_spacy_featurizer_casing(spacy_nlp): + from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer + + # if this starts failing for the default model, we should think about + # removing the lower casing the spacy nlp component does when it + # retrieves vectors. For compressed spacy models (e.g. models + # ending in _sm) this test will most likely fail. + + ftr = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + td = training_data.load_data("data/examples/rasa/demo-rasa.json") + for e in td.intent_examples: + doc = spacy_nlp(e.text) + doc_capitalized = spacy_nlp(e.text.capitalize()) + + vecs = ftr._features_for_doc(doc) + vecs_capitalized = ftr._features_for_doc(doc_capitalized) + + assert np.allclose( + vecs, vecs_capitalized, atol=1e-5 + ), "Vectors are unequal for texts '{}' and '{}'".format( + e.text, e.text.capitalize() + ) + + +def test_spacy_featurizer_train(spacy_nlp): + + featurizer = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + sentence = "Hey how are you today" + message = Message(sentence) + message.set(RESPONSE, sentence) + message.set(INTENT, "intent") + message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence)) + message.set(SPACY_DOCS[RESPONSE], spacy_nlp(sentence)) + + featurizer.train(TrainingData([message]), RasaNLUModelConfig()) + + expected = np.array([-0.28451, 0.31007, -0.57039, -0.073056, -0.17322]) + expected_cls = np.array([-0.196496, 0.3249364, -0.37408298, -0.10622784, 0.062756]) + + seq_vecs, sen_vecs = message.get_dense_features(TEXT, []) + + assert 5 == len(seq_vecs) + assert 1 == len(sen_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vecs[-1][:5], expected_cls, atol=1e-5) + + seq_vecs, sen_vecs = message.get_dense_features(RESPONSE, []) + + assert 5 == len(seq_vecs) + assert 1 == len(sen_vecs) + assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5) + assert np.allclose(sen_vecs[-1][:5], expected_cls, atol=1e-5) + + seq_vecs, sen_vecs = message.get_dense_features(INTENT, []) + + assert seq_vecs is None + assert sen_vecs is None + + +def test_spacy_featurizer_using_empty_model(): + from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer + import spacy + + sentence = "This test is using an empty spaCy model" + + model = spacy.blank("en") + doc = model(sentence) + + ftr = SpacyFeaturizer.create({}, RasaNLUModelConfig()) + + message = Message(sentence) + message.set(SPACY_DOCS[TEXT], doc) + + ftr._set_spacy_features(message) + + seq_vecs, sen_vecs = message.get_dense_features(TEXT, []) + + assert seq_vecs is None + assert sen_vecs is None diff --git a/tests/nlu/selectors/__init__.py b/tests/nlu/selectors/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/selectors/test_selectors.py b/tests/nlu/selectors/test_selectors.py new file mode 100644 index 000000000000..1e02d7fafaf7 --- /dev/null +++ b/tests/nlu/selectors/test_selectors.py @@ -0,0 +1,68 @@ +import pytest + +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.training_data import load_data +from rasa.nlu.train import Trainer, Interpreter +from rasa.utils.tensorflow.constants import ( + EPOCHS, + MASKED_LM, + NUM_TRANSFORMER_LAYERS, + TRANSFORMER_SIZE, +) +from rasa.nlu.constants import RESPONSE_SELECTOR_PROPERTY_NAME + + +@pytest.mark.parametrize( + "pipeline", + [ + [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + {"name": "ResponseSelector", EPOCHS: 1}, + ], + [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + { + "name": "ResponseSelector", + EPOCHS: 1, + MASKED_LM: True, + TRANSFORMER_SIZE: 256, + NUM_TRANSFORMER_LAYERS: 1, + }, + ], + ], +) +def test_train_selector(pipeline, component_builder, tmpdir): + # use data that include some responses + td = load_data("data/examples/rasa/demo-rasa.md") + td_responses = load_data("data/examples/rasa/demo-rasa-responses.md") + td = td.merge(td_responses) + + nlu_config = RasaNLUModelConfig({"language": "en", "pipeline": pipeline}) + + trainer = Trainer(nlu_config) + trainer.train(td) + + persisted_path = trainer.persist(tmpdir) + + assert trainer.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + parsed = loaded.parse("hello") + + assert loaded.pipeline + assert parsed is not None + assert ( + parsed.get(RESPONSE_SELECTOR_PROPERTY_NAME) + .get("default") + .get("full_retrieval_intent") + ) is not None + + ranking = parsed.get(RESPONSE_SELECTOR_PROPERTY_NAME).get("default").get("ranking") + assert ranking is not None + + for rank in ranking: + assert rank.get("name") is not None + assert rank.get("confidence") is not None + assert rank.get("full_retrieval_intent") is not None diff --git a/tests/nlu/test_components.py b/tests/nlu/test_components.py new file mode 100644 index 000000000000..ec90bcfee2d8 --- /dev/null +++ b/tests/nlu/test_components.py @@ -0,0 +1,101 @@ +import pytest + +from rasa.nlu import registry, train +from rasa.nlu.components import find_unavailable_packages +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.model import Interpreter, Metadata + + +@pytest.mark.parametrize("component_class", registry.component_classes) +def test_no_components_with_same_name(component_class): + """The name of the components need to be unique as they will + be referenced by name when defining processing pipelines.""" + + names = [cls.name for cls in registry.component_classes] + assert ( + names.count(component_class.name) == 1 + ), f"There is more than one component named {component_class.name}" + + +@pytest.mark.parametrize("component_class", registry.component_classes) +def test_all_required_components_can_be_satisfied(component_class): + """Checks that all required_components are present in the registry.""" + + def _required_component_in_registry(component): + for previous_component in registry.component_classes: + if issubclass(previous_component, component): + return True + return False + + missing_components = [] + for required_component in component_class.required_components(): + if not _required_component_in_registry(required_component): + missing_components.append(required_component.name) + + assert missing_components == [], ( + f"There is no required components {missing_components} " + f"for '{component_class.name}'." + ) + + +def test_find_unavailable_packages(): + unavailable = find_unavailable_packages( + ["my_made_up_package_name", "io", "foo_bar", "foo_bar"] + ) + assert unavailable == {"my_made_up_package_name", "foo_bar"} + + +def test_builder_create_by_module_path(component_builder, blank_config): + from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer + + path = "rasa.nlu.featurizers.sparse_featurizer.regex_featurizer.RegexFeaturizer" + component_config = {"name": path} + component = component_builder.create_component(component_config, blank_config) + assert type(component) == RegexFeaturizer + + +@pytest.mark.parametrize( + "test_input, expected_output, error", + [ + ("my_made_up_component", "Cannot find class", Exception), + ( + "rasa.nlu.featurizers.regex_featurizer.MadeUpClass", + "Failed to find class", + Exception, + ), + ("made.up.path.RegexFeaturizer", "No module named", ModuleNotFoundError), + ], +) +def test_create_component_exception_messages( + component_builder, blank_config, test_input, expected_output, error +): + + with pytest.raises(error): + component_config = {"name": test_input} + component_builder.create_component(component_config, blank_config) + + +def test_builder_load_unknown(component_builder): + with pytest.raises(Exception) as excinfo: + component_meta = {"name": "my_made_up_componment"} + component_builder.load_component(component_meta, "", Metadata({}, None)) + assert "Cannot find class" in str(excinfo.value) + + +async def test_example_component(component_builder, tmp_path): + _config = RasaNLUModelConfig( + {"pipeline": [{"name": "tests.nlu.example_component.MyComponent"}]} + ) + + (trainer, trained, persisted_path) = await train( + _config, + data="./data/examples/rasa/demo-rasa.json", + path=str(tmp_path), + component_builder=component_builder, + ) + + assert trainer.pipeline + + loaded = Interpreter.load(persisted_path, component_builder) + + assert loaded.parse("test") is not None diff --git a/tests/nlu/test_config.py b/tests/nlu/test_config.py new file mode 100644 index 000000000000..ef5dcc2a3f22 --- /dev/null +++ b/tests/nlu/test_config.py @@ -0,0 +1,236 @@ +import os +from typing import Text, List +from unittest.mock import Mock + +import pytest +from _pytest.monkeypatch import MonkeyPatch + +from rasa.importers import autoconfig +from rasa.importers.rasa import RasaFileImporter +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu import config, load_data +from rasa.nlu import components +from rasa.nlu.components import ComponentBuilder +from rasa.nlu.constants import TRAINABLE_EXTRACTORS +from rasa.nlu.model import Trainer +from tests.nlu.utilities import write_file_config + + +def test_blank_config(blank_config): + file_config = {} + f = write_file_config(file_config) + final_config = config.load(f.name) + + assert final_config.as_dict() == blank_config.as_dict() + + +def test_invalid_config_json(tmp_path): + file_config = """pipeline: [pretrained_embeddings_spacy""" # invalid yaml + + f = tmp_path / "tmp_config_file.json" + f.write_text(file_config) + + with pytest.raises(config.InvalidConfigError): + config.load(str(f)) + + +def test_invalid_many_tokenizers_in_config(): + nlu_config = { + "pipeline": [{"name": "WhitespaceTokenizer"}, {"name": "SpacyTokenizer"}] + } + + with pytest.raises(config.InvalidConfigError) as execinfo: + Trainer(config.RasaNLUModelConfig(nlu_config)) + assert "More than one tokenizer is used" in str(execinfo.value) + + +@pytest.mark.parametrize( + "_config", + [ + {"pipeline": [{"name": "WhitespaceTokenizer"}, {"name": "SpacyFeaturizer"}]}, + pytest.param( + { + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "ConveRTFeaturizer"}, + ] + }, + ), + pytest.param( + { + "pipeline": [ + {"name": "ConveRTTokenizer"}, + {"name": "LanguageModelFeaturizer"}, + ] + }, + ), + ], +) +@pytest.mark.skip_on_windows +def test_missing_required_component(_config): + with pytest.raises(config.InvalidConfigError) as execinfo: + Trainer(config.RasaNLUModelConfig(_config)) + assert "Add required components to the pipeline" in str(execinfo.value) + + +@pytest.mark.parametrize( + "pipeline_config", [{"pipeline": [{"name": "CountVectorsFeaturizer"}]}] +) +def test_missing_property(pipeline_config): + with pytest.raises(config.InvalidConfigError) as execinfo: + Trainer(config.RasaNLUModelConfig(pipeline_config)) + assert "Add required components to the pipeline" in str(execinfo.value) + + +def test_default_config_file(): + final_config = config.RasaNLUModelConfig() + assert len(final_config) > 1 + + +def test_set_attr_on_component(): + _config = RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "SpacyNLP"}, + {"name": "SpacyTokenizer"}, + {"name": "SpacyFeaturizer"}, + {"name": "DIETClassifier"}, + ], + } + ) + idx_classifier = _config.component_names.index("DIETClassifier") + idx_tokenizer = _config.component_names.index("SpacyTokenizer") + + _config.set_component_attr(idx_classifier, epochs=10) + + assert _config.for_component(idx_tokenizer) == {"name": "SpacyTokenizer"} + assert _config.for_component(idx_classifier) == { + "name": "DIETClassifier", + "epochs": 10, + } + + +def test_override_defaults_supervised_embeddings_pipeline(): + builder = ComponentBuilder() + + _config = RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "SpacyNLP"}, + {"name": "SpacyTokenizer"}, + {"name": "SpacyFeaturizer", "pooling": "max"}, + { + "name": "DIETClassifier", + "epochs": 10, + "hidden_layers_sizes": {"text": [256, 128]}, + }, + ], + } + ) + + idx_featurizer = _config.component_names.index("SpacyFeaturizer") + idx_classifier = _config.component_names.index("DIETClassifier") + + component1 = builder.create_component( + _config.for_component(idx_featurizer), _config + ) + assert component1.component_config["pooling"] == "max" + + component2 = builder.create_component( + _config.for_component(idx_classifier), _config + ) + assert component2.component_config["epochs"] == 10 + assert ( + component2.defaults["hidden_layers_sizes"].keys() + == component2.component_config["hidden_layers_sizes"].keys() + ) + + +def config_files_in(config_directory: Text): + return [ + os.path.join(config_directory, f) + for f in os.listdir(config_directory) + if os.path.isfile(os.path.join(config_directory, f)) + ] + + +@pytest.mark.parametrize( + "config_file", + config_files_in("data/configs_for_docs") + config_files_in("docker/configs"), +) +async def test_train_docker_and_docs_configs( + config_file: Text, monkeypatch: MonkeyPatch +): + monkeypatch.setattr(autoconfig, "_dump_config", Mock()) + importer = RasaFileImporter(config_file=config_file) + imported_config = await importer.get_config() + + loaded_config = config.load(imported_config) + + assert len(loaded_config.component_names) > 1 + assert loaded_config.language == imported_config["language"] + + +@pytest.mark.parametrize( + "config_path, data_path, expected_warning_excerpts", + [ + ( + "data/test_config/config_supervised_embeddings.yml", + "data/examples/rasa", + ["add a 'ResponseSelector'"], + ), + ( + "data/test_config/config_spacy_entity_extractor.yml", + "data/test/md_converted_to_json.json", + [f"add one of {TRAINABLE_EXTRACTORS}"], + ), + ( + "data/test_config/config_crf_no_regex.yml", + "data/test/duplicate_intents_markdown/demo-rasa-intents-2.md", + ["training data with regexes", "include a 'RegexFeaturizer'"], + ), + ( + "data/test_config/config_crf_no_regex.yml", + "data/test/lookup_tables/lookup_table.json", + ["training data consisting of lookup tables", "add a 'RegexFeaturizer'"], + ), + ( + "data/test_config/config_spacy_entity_extractor.yml", + "data/test/lookup_tables/lookup_table.json", + [ + "add a 'DIETClassifier' or a 'CRFEntityExtractor' with the 'pattern' feature" + ], + ), + ( + "data/test_config/config_crf_no_pattern_feature.yml", + "data/test/lookup_tables/lookup_table.md", + "your NLU pipeline's 'CRFEntityExtractor' does not include the 'pattern' feature", + ), + ( + "data/test_config/config_crf_no_synonyms.yml", + "data/test/markdown_single_sections/synonyms_only.md", + ["add an 'EntitySynonymMapper'"], + ), + ( + "data/test_config/config_embedding_intent_response_selector.yml", + "data/test/demo-rasa-composite-entities.md", + ["include either 'DIETClassifier' or 'CRFEntityExtractor'"], + ), + ], +) +def test_validate_required_components_from_data( + config_path: Text, data_path: Text, expected_warning_excerpts: List[Text] +): + loaded_config = config.load(config_path) + trainer = Trainer(loaded_config) + training_data = load_data(data_path) + with pytest.warns(UserWarning) as record: + components.validate_required_components_from_data( + trainer.pipeline, training_data + ) + assert len(record) == 1 + assert all( + [excerpt in record[0].message.args[0]] for excerpt in expected_warning_excerpts + ) diff --git a/tests/nlu/base/test_evaluation.py b/tests/nlu/test_evaluation.py similarity index 61% rename from tests/nlu/base/test_evaluation.py rename to tests/nlu/test_evaluation.py index e51567cc5c17..8d0d607e2ece 100644 --- a/tests/nlu/base/test_evaluation.py +++ b/tests/nlu/test_evaluation.py @@ -1,15 +1,20 @@ -# coding=utf-8 +from sanic.request import Request +from typing import Text, Iterator, List, Dict, Any + import asyncio -import logging import pytest +from _pytest.tmpdir import TempdirFactory import rasa.utils.io +from rasa.nlu.constants import NO_ENTITY_TAG +from rasa.nlu.classifiers.diet_classifier import DIETClassifier +from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor from rasa.test import compare_nlu_models -from rasa.nlu.extractors import EntityExtractor +from rasa.nlu.extractors.extractor import EntityExtractor from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor -from rasa.nlu.model import Interpreter +from rasa.nlu.model import Interpreter, Trainer from rasa.nlu.test import ( is_token_within_entity, do_entities_overlap, @@ -28,24 +33,37 @@ evaluate_intents, evaluate_entities, evaluate_response_selections, - get_unique_labels, - get_evaluation_metrics, NO_ENTITY, collect_successful_entity_predictions, collect_incorrect_entity_predictions, + merge_confidences, + _get_entity_confidences, ) from rasa.nlu.test import does_token_cross_borders from rasa.nlu.test import align_entity_predictions from rasa.nlu.test import determine_intersection from rasa.nlu.test import determine_token_labels from rasa.nlu.config import RasaNLUModelConfig -from rasa.nlu.tokenizers import Token -from rasa.nlu import utils +from rasa.nlu.tokenizers.tokenizer import Token import json import os -from rasa.nlu import training_data, config -from tests.nlu import utilities -from tests.nlu.conftest import DEFAULT_DATA_PATH, NLU_DEFAULT_CONFIG_PATH +from rasa.nlu import training_data +from tests.nlu.conftest import DEFAULT_DATA_PATH +from rasa.nlu.selectors.response_selector import ResponseSelector +from rasa.nlu.test import is_response_selector_present +from rasa.utils.tensorflow.constants import EPOCHS, ENTITY_RECOGNITION + + +# https://github.com/pytest-dev/pytest-asyncio/issues/68 +# this event_loop is used by pytest-asyncio, and redefining it +# is currently the only way of changing the scope of this fixture + + +@pytest.yield_fixture(scope="session") +def event_loop(request: Request) -> Iterator[asyncio.AbstractEventLoop]: + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() @pytest.fixture(scope="session") @@ -57,25 +75,6 @@ def loop(): loop.close() -@pytest.fixture(scope="session") -async def pretrained_interpreter(component_builder, tmpdir_factory): - conf = RasaNLUModelConfig( - { - "pipeline": [ - {"name": "SpacyNLP"}, - {"name": "SpacyEntityExtractor"}, - {"name": "DucklingHTTPExtractor"}, - ] - } - ) - return await utilities.interpreter_for( - component_builder, - data="./data/examples/rasa/demo-rasa.json", - path=tmpdir_factory.mktemp("projects").strpath, - config=conf, - ) - - # Chinese Example # "对面食过敏" -> To be allergic to wheat-based food CH_wrong_segmentation = [ @@ -196,7 +195,7 @@ def test_determine_token_labels_throws_error(): determine_token_labels( CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], - ["CRFEntityExtractor"], + [CRFEntityExtractor.name], ) @@ -208,18 +207,83 @@ def test_determine_token_labels_no_extractors(): def test_determine_token_labels_no_extractors_no_overlap(): - determine_token_labels(CH_correct_segmentation[0], EN_targets, None) + label = determine_token_labels(CH_correct_segmentation[0], EN_targets, None) + assert label == NO_ENTITY_TAG def test_determine_token_labels_with_extractors(): - determine_token_labels( + label = determine_token_labels( CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], [SpacyEntityExtractor.name, MitieEntityExtractor.name], ) + assert label == "direction" + + +@pytest.mark.parametrize( + "token, entities, extractors, expected_confidence", + [ + ( + Token("pizza", 4), + [ + { + "start": 4, + "end": 9, + "value": "pizza", + "entity": "food", + "extractor": "EntityExtractorA", + } + ], + ["EntityExtractorA"], + 0.0, + ), + (Token("pizza", 4), [], ["EntityExtractorA"], 0.0), + ( + Token("pizza", 4), + [ + { + "start": 4, + "end": 9, + "value": "pizza", + "entity": "food", + "confidence_entity": 0.87, + "extractor": "CRFEntityExtractor", + } + ], + ["CRFEntityExtractor"], + 0.87, + ), + ( + Token("pizza", 4), + [ + { + "start": 4, + "end": 9, + "value": "pizza", + "entity": "food", + "confidence_entity": 0.87, + "extractor": "DIETClassifier", + } + ], + ["DIETClassifier"], + 0.87, + ), + ], +) +def test_get_entity_confidences( + token: Token, + entities: List[Dict[Text, Any]], + extractors: List[Text], + expected_confidence: float, +): + confidence = _get_entity_confidences(token, entities, extractors) + + assert confidence == expected_confidence def test_label_merging(): + import numpy as np + aligned_predictions = [ { "target_labels": ["O", "O"], @@ -231,13 +295,35 @@ def test_label_merging(): }, ] - assert all(merge_labels(aligned_predictions) == ["O", "O", "LOC", "O", "O"]) - assert all( + assert np.all(merge_labels(aligned_predictions) == ["O", "O", "LOC", "O", "O"]) + assert np.all( merge_labels(aligned_predictions, "EntityExtractorA") == ["O", "O", "O", "O", "O"] ) +def test_confidence_merging(): + import numpy as np + + aligned_predictions = [ + { + "target_labels": ["O", "O"], + "extractor_labels": {"EntityExtractorA": ["O", "O"]}, + "confidences": {"EntityExtractorA": [0.0, 0.0]}, + }, + { + "target_labels": ["LOC", "O", "O"], + "extractor_labels": {"EntityExtractorA": ["O", "O", "O"]}, + "confidences": {"EntityExtractorA": [0.98, 0.0, 0.0]}, + }, + ] + + assert np.all( + merge_confidences(aligned_predictions, "EntityExtractorA") + == [0.0, 0.0, 0.98, 0.0, 0.0] + ) + + def test_drop_intents_below_freq(): td = training_data.load_data("data/examples/rasa/demo-rasa.json") clean_td = drop_intents_below_freq(td, 0) @@ -254,21 +340,29 @@ def test_drop_intents_below_freq(): def test_run_evaluation(unpacked_trained_moodbot_path): - data = DEFAULT_DATA_PATH - result = run_evaluation( - data, os.path.join(unpacked_trained_moodbot_path, "nlu"), errors=None + DEFAULT_DATA_PATH, + os.path.join(unpacked_trained_moodbot_path, "nlu"), + errors=False, + successes=False, + disable_plotting=True, ) + assert result.get("intent_evaluation") - assert result.get("entity_evaluation").get("CRFEntityExtractor") -def test_run_cv_evaluation(): +def test_run_cv_evaluation(pretrained_embeddings_spacy_config): td = training_data.load_data("data/examples/rasa/demo-rasa.json") - nlu_config = config.load("sample_configs/config_pretrained_embeddings_spacy.yml") n_folds = 2 - intent_results, entity_results = cross_validate(td, n_folds, nlu_config) + intent_results, entity_results, response_selection_results = cross_validate( + td, + n_folds, + pretrained_embeddings_spacy_config, + successes=False, + errors=False, + disable_plotting=True, + ) assert len(intent_results.train["Accuracy"]) == n_folds assert len(intent_results.train["Precision"]) == n_folds @@ -284,6 +378,67 @@ def test_run_cv_evaluation(): assert len(entity_results.test["CRFEntityExtractor"]["F1-score"]) == n_folds +def test_run_cv_evaluation_with_response_selector(): + training_data_obj = training_data.load_data("data/examples/rasa/demo-rasa.md") + training_data_responses_obj = training_data.load_data( + "data/examples/rasa/demo-rasa-responses.md" + ) + training_data_obj = training_data_obj.merge(training_data_responses_obj) + + nlu_config = RasaNLUModelConfig( + { + "language": "en", + "pipeline": [ + {"name": "WhitespaceTokenizer"}, + {"name": "CountVectorsFeaturizer"}, + {"name": "DIETClassifier", EPOCHS: 2}, + {"name": "ResponseSelector", EPOCHS: 2}, + ], + } + ) + + n_folds = 2 + intent_results, entity_results, response_selection_results = cross_validate( + training_data_obj, + n_folds, + nlu_config, + successes=False, + errors=False, + disable_plotting=True, + ) + + assert len(intent_results.train["Accuracy"]) == n_folds + assert len(intent_results.train["Precision"]) == n_folds + assert len(intent_results.train["F1-score"]) == n_folds + assert len(intent_results.test["Accuracy"]) == n_folds + assert len(intent_results.test["Precision"]) == n_folds + assert len(intent_results.test["F1-score"]) == n_folds + assert len(response_selection_results.train["Accuracy"]) == n_folds + assert len(response_selection_results.train["Precision"]) == n_folds + assert len(response_selection_results.train["F1-score"]) == n_folds + assert len(response_selection_results.test["Accuracy"]) == n_folds + assert len(response_selection_results.test["Precision"]) == n_folds + assert len(response_selection_results.test["F1-score"]) == n_folds + assert len(entity_results.train["DIETClassifier"]["Accuracy"]) == n_folds + assert len(entity_results.train["DIETClassifier"]["Precision"]) == n_folds + assert len(entity_results.train["DIETClassifier"]["F1-score"]) == n_folds + assert len(entity_results.test["DIETClassifier"]["Accuracy"]) == n_folds + assert len(entity_results.test["DIETClassifier"]["Precision"]) == n_folds + assert len(entity_results.test["DIETClassifier"]["F1-score"]) == n_folds + + +def test_response_selector_present(): + response_selector_component = ResponseSelector() + + interpreter_with_response_selector = Interpreter( + [response_selector_component], context=None + ) + interpreter_without_response_selector = Interpreter([], context=None) + + assert is_response_selector_present(interpreter_with_response_selector) + assert not is_response_selector_present(interpreter_without_response_selector) + + def test_intent_evaluation_report(tmpdir_factory): path = tmpdir_factory.mktemp("evaluation").strpath report_folder = os.path.join(path, "reports") @@ -299,15 +454,20 @@ def test_intent_evaluation_report(tmpdir_factory): result = evaluate_intents( intent_results, report_folder, - successes=False, - errors=False, - confmat_filename=None, - intent_hist_filename=None, + successes=True, + errors=True, + disable_plotting=False, ) report = json.loads(rasa.utils.io.read_file(report_filename)) - greet_results = {"precision": 1.0, "recall": 1.0, "f1-score": 1.0, "support": 1} + greet_results = { + "precision": 1.0, + "recall": 1.0, + "f1-score": 1.0, + "support": 1, + "confused_with": {}, + } prediction = { "text": "hello", @@ -320,6 +480,66 @@ def test_intent_evaluation_report(tmpdir_factory): assert report["greet"] == greet_results assert result["predictions"][0] == prediction + assert os.path.exists(os.path.join(report_folder, "intent_confusion_matrix.png")) + assert os.path.exists(os.path.join(report_folder, "intent_histogram.png")) + assert not os.path.exists(os.path.join(report_folder, "intent_errors.json")) + assert os.path.exists(os.path.join(report_folder, "intent_successes.json")) + + +def test_intent_evaluation_report_large(tmpdir_factory: TempdirFactory): + path = tmpdir_factory.mktemp("evaluation") + report_folder = path / "reports" + report_filename = report_folder / "intent_report.json" + + rasa.utils.io.create_directory(str(report_folder)) + + def correct(label: Text) -> IntentEvaluationResult: + return IntentEvaluationResult(label, label, "", 1.0) + + def incorrect(label: Text, _label: Text) -> IntentEvaluationResult: + return IntentEvaluationResult(label, _label, "", 1.0) + + a_results = [correct("A")] * 10 + b_results = [correct("B")] * 7 + [incorrect("B", "C")] * 3 + c_results = [correct("C")] * 3 + [incorrect("C", "D")] + [incorrect("C", "E")] + d_results = [correct("D")] * 29 + [incorrect("D", "B")] * 3 + e_results = [incorrect("E", "C")] * 5 + [incorrect("E", "")] * 5 + + intent_results = a_results + b_results + c_results + d_results + e_results + + evaluate_intents( + intent_results, + report_folder, + successes=False, + errors=False, + disable_plotting=True, + ) + + report = json.loads(rasa.utils.io.read_file(str(report_filename))) + + a_results = { + "precision": 1.0, + "recall": 1.0, + "f1-score": 1.0, + "support": 10, + "confused_with": {}, + } + + e_results = { + "precision": 0.0, + "recall": 0.0, + "f1-score": 0.0, + "support": 10, + "confused_with": {"C": 5, "": 5}, + } + + c_confused_with = {"D": 1, "E": 1} + + assert len(report.keys()) == 8 + assert report["A"] == a_results + assert report["E"] == e_results + assert report["C"]["confused_with"] == c_confused_with + def test_response_evaluation_report(tmpdir_factory): path = tmpdir_factory.mktemp("evaluation").strpath @@ -345,7 +565,13 @@ def test_response_evaluation_report(tmpdir_factory): ), ] - result = evaluate_response_selections(response_results, report_folder) + result = evaluate_response_selections( + response_results, + report_folder, + successes=True, + errors=True, + disable_plotting=False, + ) report = json.loads(rasa.utils.io.read_file(report_filename)) @@ -354,6 +580,7 @@ def test_response_evaluation_report(tmpdir_factory): "recall": 1.0, "f1-score": 1.0, "support": 1, + "confused_with": {}, } prediction = { @@ -368,6 +595,39 @@ def test_response_evaluation_report(tmpdir_factory): assert report["My name is Mr.bot"] == name_query_results assert result["predictions"][1] == prediction + assert os.path.exists( + os.path.join(report_folder, "response_selection_confusion_matrix.png") + ) + assert os.path.exists( + os.path.join(report_folder, "response_selection_histogram.png") + ) + assert not os.path.exists( + os.path.join(report_folder, "response_selection_errors.json") + ) + assert os.path.exists( + os.path.join(report_folder, "response_selection_successes.json") + ) + + +@pytest.mark.parametrize( + "components, expected_extractors", + [ + ([DIETClassifier({ENTITY_RECOGNITION: False})], set()), + ([DIETClassifier({ENTITY_RECOGNITION: True})], {"DIETClassifier"}), + ([CRFEntityExtractor()], {"CRFEntityExtractor"}), + ( + [SpacyEntityExtractor(), CRFEntityExtractor()], + {"SpacyEntityExtractor", "CRFEntityExtractor"}, + ), + ([ResponseSelector()], set()), + ], +) +def test_get_entity_extractors(components, expected_extractors): + mock_interpreter = Interpreter(components, None) + extractors = get_entity_extractors(mock_interpreter) + + assert extractors == expected_extractors + def test_entity_evaluation_report(tmpdir_factory): class EntityExtractorA(EntityExtractor): @@ -376,7 +636,7 @@ class EntityExtractorA(EntityExtractor): def __init__(self, component_config=None) -> None: - super(EntityExtractorA, self).__init__(component_config) + super().__init__(component_config) class EntityExtractorB(EntityExtractor): @@ -384,7 +644,7 @@ class EntityExtractorB(EntityExtractor): def __init__(self, component_config=None) -> None: - super(EntityExtractorB, self).__init__(component_config) + super().__init__(component_config) path = tmpdir_factory.mktemp("evaluation").strpath report_folder = os.path.join(path, "reports") @@ -401,7 +661,14 @@ def __init__(self, component_config=None) -> None: None, ) extractors = get_entity_extractors(mock_interpreter) - result = evaluate_entities([EN_entity_result], extractors, report_folder) + result = evaluate_entities( + [EN_entity_result], + extractors, + report_folder, + errors=True, + successes=True, + disable_plotting=False, + ) report_a = json.loads(rasa.utils.io.read_file(report_filename_a)) report_b = json.loads(rasa.utils.io.read_file(report_filename_b)) @@ -412,6 +679,17 @@ def __init__(self, component_config=None) -> None: assert report_a["macro avg"]["recall"] == 0.5 assert result["EntityExtractorA"]["accuracy"] == 0.75 + assert os.path.exists( + os.path.join(report_folder, "EntityExtractorA_confusion_matrix.png") + ) + assert os.path.exists(os.path.join(report_folder, "EntityExtractorA_errors.json")) + assert os.path.exists( + os.path.join(report_folder, "EntityExtractorA_successes.json") + ) + assert not os.path.exists( + os.path.join(report_folder, "EntityExtractorA_histogram.png") + ) + def test_empty_intent_removal(): intent_results = [ @@ -457,6 +735,7 @@ def test_evaluate_entities_cv_empty_tokens(): assert result == { "target_labels": [], "extractor_labels": {"EntityExtractorA": [], "EntityExtractorB": []}, + "confidences": {"EntityExtractorA": [], "EntityExtractorB": []}, }, "Wrong entity prediction alignment" @@ -509,19 +788,53 @@ def test_evaluate_entities_cv(): "movie", ], }, + "confidences": { + "EntityExtractorA": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ], + "EntityExtractorB": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ], + }, }, "Wrong entity prediction alignment" -def test_get_entity_extractors(pretrained_interpreter): - assert get_entity_extractors(pretrained_interpreter) == { - "SpacyEntityExtractor", - "DucklingHTTPExtractor", - } - +def test_remove_pretrained_extractors(component_builder): + _config = RasaNLUModelConfig( + { + "pipeline": [ + {"name": "SpacyNLP"}, + {"name": "SpacyEntityExtractor"}, + {"name": "DucklingHTTPExtractor"}, + ] + } + ) + trainer = Trainer(_config, component_builder) -def test_remove_pretrained_extractors(pretrained_interpreter): target_components_names = ["SpacyNLP"] - filtered_pipeline = remove_pretrained_extractors(pretrained_interpreter.pipeline) + filtered_pipeline = remove_pretrained_extractors(trainer.pipeline) filtered_components_names = [c.name for c in filtered_pipeline] assert filtered_components_names == target_components_names @@ -532,68 +845,12 @@ def test_label_replacement(): assert substitute_labels(original_labels, "O", "no_entity") == target_labels -@pytest.mark.parametrize( - "targets,exclude_label,expected", - [ - ( - ["no_entity", "location", "location", "location", "person"], - NO_ENTITY, - ["location", "person"], - ), - ( - ["no_entity", "location", "location", "location", "person"], - None, - ["no_entity", "location", "person"], - ), - (["no_entity"], NO_ENTITY, []), - (["location", "location", "location"], NO_ENTITY, ["location"]), - ([], None, []), - ], -) -def test_get_label_set(targets, exclude_label, expected): - actual = get_unique_labels(targets, exclude_label) - assert set(expected) == set(actual) - - -@pytest.mark.parametrize( - "targets,predictions,expected_precision,expected_fscore,expected_accuracy", - [ - ( - ["no_entity", "location", "no_entity", "location", "no_entity"], - ["no_entity", "location", "no_entity", "no_entity", "person"], - 1.0, - 0.6666666666666666, - 3 / 5, - ), - ( - ["no_entity", "no_entity", "no_entity", "no_entity", "person"], - ["no_entity", "no_entity", "no_entity", "no_entity", "no_entity"], - 0.0, - 0.0, - 4 / 5, - ), - ], -) -def test_get_evaluation_metrics( - targets, predictions, expected_precision, expected_fscore, expected_accuracy -): - report, precision, f1, accuracy = get_evaluation_metrics( - targets, predictions, True, exclude_label=NO_ENTITY - ) - - assert f1 == expected_fscore - assert precision == expected_precision - assert accuracy == expected_accuracy - assert NO_ENTITY not in report - +def test_nlu_comparison(tmpdir, config_path, config_path_duplicate): + # the configs need to be at a different path, otherwise the results are + # combined on the same dictionary key and cannot be plotted properly + configs = [config_path, config_path_duplicate] -def test_nlu_comparison(tmpdir): - configs = [ - NLU_DEFAULT_CONFIG_PATH, - "sample_configs/config_supervised_embeddings.yml", - ] output = tmpdir.strpath - compare_nlu_models( configs, DEFAULT_DATA_PATH, output, runs=2, exclusion_percentages=[50, 80] ) @@ -608,6 +865,16 @@ def test_nlu_comparison(tmpdir): run_1_path = os.path.join(output, "run_1") assert set(os.listdir(run_1_path)) == {"50%_exclusion", "80%_exclusion", "test.md"} + exclude_50_path = os.path.join(run_1_path, "50%_exclusion") + modelnames = [os.path.splitext(os.path.basename(config))[0] for config in configs] + + modeloutputs = set( + ["train"] + + [f"{m}_report" for m in modelnames] + + [f"{m}.tar.gz" for m in modelnames] + ) + assert set(os.listdir(exclude_50_path)) == modeloutputs + @pytest.mark.parametrize( "entity_results,targets,predictions,successes,errors", diff --git a/tests/nlu/base/test_interpreter.py b/tests/nlu/test_interpreter.py similarity index 62% rename from tests/nlu/base/test_interpreter.py rename to tests/nlu/test_interpreter.py index 72187f43a063..4211dceb4333 100644 --- a/tests/nlu/base/test_interpreter.py +++ b/tests/nlu/test_interpreter.py @@ -15,33 +15,6 @@ from tests.nlu import utilities -@utilities.slowtest -@pytest.mark.parametrize( - "pipeline_template", list(registry.registered_pipeline_templates.keys()) -) -async def test_interpreter(pipeline_template, component_builder, tmpdir): - test_data = "data/examples/rasa/demo-rasa.json" - _conf = utilities.base_test_conf(pipeline_template) - _conf["data"] = test_data - td = training_data.load_data(test_data) - interpreter = await utilities.interpreter_for( - component_builder, "data/examples/rasa/demo-rasa.json", tmpdir.strpath, _conf - ) - - texts = ["good bye", "i am looking for an indian spot"] - - for text in texts: - result = interpreter.parse(text, time=None) - assert result["text"] == text - assert not result["intent"]["name"] or result["intent"]["name"] in td.intents - assert result["intent"]["confidence"] >= 0 - # Ensure the model doesn't detect entity types that are not present - # Models on our test data set are not stable enough to - # require the exact entities to be found - for entity in result["entities"]: - assert entity["entity"] in td.entities - - @pytest.mark.parametrize( "metadata", [ @@ -61,9 +34,10 @@ async def test_interpreter(pipeline_template, component_builder, tmpdir): {"rasa_version": "0.14.4"}, {"rasa_version": "0.15.0a1"}, {"rasa_version": "1.0.0a1"}, + {"rasa_version": "1.5.0"}, ], ) -def test_model_not_compatible(metadata): +def test_model_is_not_compatible(metadata): with pytest.raises(rasa.nlu.model.UnsupportedModelError): Interpreter.ensure_model_compatibility(metadata) @@ -89,7 +63,6 @@ def test_model_is_compatible(metadata): }, {"obj": "trained_nlu_model", "endpoint": None, "type": RasaNLUInterpreter}, {"obj": "not-existing", "endpoint": None, "type": RegexInterpreter}, - {"obj": ["list-object"], "endpoint": None, "type": RegexInterpreter}, ], ) def test_create_interpreter(parameters, trained_nlu_model): @@ -97,6 +70,6 @@ def test_create_interpreter(parameters, trained_nlu_model): if obj == "trained_nlu_model": _, obj = get_model_subdirectories(get_model(trained_nlu_model)) - interpreter = NaturalLanguageInterpreter.create(obj, parameters["endpoint"]) + interpreter = NaturalLanguageInterpreter.create(parameters["endpoint"] or obj) assert isinstance(interpreter, parameters["type"]) diff --git a/tests/nlu/base/test_persistor.py b/tests/nlu/test_persistor.py similarity index 50% rename from tests/nlu/base/test_persistor.py rename to tests/nlu/test_persistor.py index ede7452d1b93..25ce5cc76161 100644 --- a/tests/nlu/base/test_persistor.py +++ b/tests/nlu/test_persistor.py @@ -5,50 +5,82 @@ from moto import mock_s3 from rasa.nlu import persistor, train -from tests.nlu import utilities +from rasa.nlu.config import RasaNLUModelConfig -class Object(object): +class Object: pass # noinspection PyPep8Naming -@mock_s3 -async def test_list_method_method_in_AWSPersistor(component_builder, tmpdir): - # artificially create a persisted model - _config = utilities.base_test_conf("keyword") - os.environ["BUCKET_NAME"] = "rasa-test" - os.environ["AWS_DEFAULT_REGION"] = "us-west-1" +async def test_list_method_method_in_AWS_persistor(component_builder, tmpdir): + with mock_s3(): + # artificially create a persisted model + _config = RasaNLUModelConfig( + {"pipeline": [{"name": "KeywordIntentClassifier"}]} + ) - (trained, _, persisted_path) = await train( - _config, - data="data/test/demo-rasa-small.json", - path=tmpdir.strpath, - storage="aws", - component_builder=component_builder, - ) + os.environ["BUCKET_NAME"] = "rasa-test" + os.environ["AWS_DEFAULT_REGION"] = "us-west-1" - # We need to create the bucket since this is all in Moto's 'virtual' AWS - # account - awspersistor = persistor.AWSPersistor(os.environ["BUCKET_NAME"]) - result = awspersistor.list_models() + (trained, _, persisted_path) = await train( + _config, + data="data/test/demo-rasa-small.json", + path=tmpdir.strpath, + storage="aws", + component_builder=component_builder, + ) - assert len(result) == 1 + # We need to create the bucket since this is all in Moto's 'virtual' AWS + # account + awspersistor = persistor.AWSPersistor(os.environ["BUCKET_NAME"]) + result = awspersistor.list_models() + + assert len(result) == 1 # noinspection PyPep8Naming -@mock_s3 -def test_list_models_method_raise_exeception_in_AWSPersistor(): - os.environ["AWS_DEFAULT_REGION"] = "us-east-1" +def test_list_models_method_raise_exeception_in_AWS_persistor(): + with mock_s3(): + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" - awspersistor = persistor.AWSPersistor("rasa-test") - result = awspersistor.list_models() + awspersistor = persistor.AWSPersistor("rasa-test", region_name="foo") + result = awspersistor.list_models() + + assert result == [] + + +# noinspection PyPep8Naming +def test_retrieve_tar_archive_with_s3_namespace(): + with mock_s3(): + model = "/my/s3/project/model.tar.gz" + destination = "dst" + with patch.object(persistor.AWSPersistor, "_decompress") as decompress: + with patch.object(persistor.AWSPersistor, "_retrieve_tar") as retrieve: + persistor.AWSPersistor("rasa-test", region_name="foo").retrieve( + model, destination + ) + decompress.assert_called_once_with("model.tar.gz", destination) + retrieve.assert_called_once_with(model) - assert result == [] + +# noinspection PyPep8Naming +def test_s3_private_retrieve_tar(): + with mock_s3(): + # Ensure the S3 persistor writes to a filename `model.tar.gz`, whilst + # passing the fully namespaced path to boto3 + model = "/my/s3/project/model.tar.gz" + awsPersistor = persistor.AWSPersistor("rasa-test", region_name="foo") + with patch.object(awsPersistor.bucket, "download_fileobj") as download_fileobj: + # noinspection PyProtectedMember + awsPersistor._retrieve_tar(model) + retrieveArgs = download_fileobj.call_args[0] + assert retrieveArgs[0] == model + assert retrieveArgs[1].name == "model.tar.gz" # noinspection PyPep8Naming -def test_list_models_method_in_GCSPersistor(): +def test_list_models_method_in_GCS_persistor(): # noinspection PyUnusedLocal def mocked_init(self, *args, **kwargs): self._model_dir_and_model_from_filename = lambda x: { @@ -70,7 +102,7 @@ def mocked_list_blobs(): # noinspection PyPep8Naming -def test_list_models_method_raise_exeception_in_GCSPersistor(): +def test_list_models_method_raise_exeception_in_GCS_persistor(): # noinspection PyUnusedLocal def mocked_init(self, *args, **kwargs): self._model_dir_and_model_from_filename = lambda x: { @@ -90,7 +122,7 @@ def mocked_list_blobs(): # noinspection PyPep8Naming -def test_list_models_method_in_AzurePersistor(): +def test_list_models_method_in_Azure_persistor(): # noinspection PyUnusedLocal def mocked_init(self, *args, **kwargs): self._model_dir_and_model_from_filename = lambda x: { @@ -114,7 +146,7 @@ def mocked_list_blobs(container_name, prefix=None): # noinspection PyPep8Naming -def test_list_models_method_raise_exeception_in_AzurePersistor(): +def test_list_models_method_raise_exeception_in_Azure_persistor(): def mocked_init(self, *args, **kwargs): self._model_dir_and_model_from_filename = lambda x: {"blob_name": ("project",)}[ x @@ -133,6 +165,16 @@ def mocked_list_blobs(container_name, prefix=None): assert result == [] +def test_get_external_persistor(): + p = persistor.get_persistor("rasa.nlu.persistor.Persistor") + assert isinstance(p, persistor.Persistor) + + +def test_raise_exception_in_get_external_persistor(): + with pytest.raises(ImportError): + _ = persistor.get_persistor("unknown.persistor") + + # noinspection PyPep8Naming @pytest.mark.parametrize( "model, archive", [("model.tar.gz", "model.tar.gz"), ("model", "model.tar.gz")] diff --git a/tests/nlu/training/test_train.py b/tests/nlu/test_train.py similarity index 52% rename from tests/nlu/training/test_train.py rename to tests/nlu/test_train.py index 5bad045cb2bb..4365069d58ab 100644 --- a/tests/nlu/training/test_train.py +++ b/tests/nlu/test_train.py @@ -1,62 +1,92 @@ -# -*- coding: utf-8 -*- - import os import pytest from rasa.nlu import registry, train from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Interpreter, Trainer -from rasa.nlu.train import create_persistor from rasa.nlu.training_data import TrainingData -from tests.nlu import utilities +from rasa.utils.tensorflow.constants import EPOCHS from tests.nlu.conftest import DEFAULT_DATA_PATH +from typing import Any, Dict, List, Tuple, Text def as_pipeline(*components): - return [{"name": c} for c in components] + return [{"name": c, EPOCHS: 1} for c in components] -def pipelines_for_tests(): +def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: # these templates really are just for testing # every component should be in here so train-persist-load-use cycle can be # tested they still need to be in a useful order - hence we can not simply # generate this automatically. + # Create separate test pipelines for dense featurizers + # because they can't co-exist in the same pipeline together, + # as their tokenizers break the incoming message into different number of tokens. + # first is language followed by list of components return [ + ("en", as_pipeline("KeywordIntentClassifier")), ( "en", as_pipeline( - "SpacyNLP", - "MitieNLP", "WhitespaceTokenizer", - "MitieTokenizer", - "SpacyTokenizer", - "MitieFeaturizer", - "SpacyFeaturizer", - "NGramFeaturizer", "RegexFeaturizer", + "LexicalSyntacticFeaturizer", "CountVectorsFeaturizer", - "MitieEntityExtractor", "CRFEntityExtractor", - "SpacyEntityExtractor", "DucklingHTTPExtractor", + "DIETClassifier", + "ResponseSelector", "EntitySynonymMapper", - "KeywordIntentClassifier", + ), + ), + ( + "en", + as_pipeline( + "SpacyNLP", + "SpacyTokenizer", + "SpacyFeaturizer", + "SpacyEntityExtractor", "SklearnIntentClassifier", - "MitieIntentClassifier", - "EmbeddingIntentClassifier", - "ResponseSelector", + ), + ), + ( + "en", + as_pipeline( + "HFTransformersNLP", + "LanguageModelTokenizer", + "LanguageModelFeaturizer", + "DIETClassifier", ), ), ( "zh", + as_pipeline( + "MitieNLP", "JiebaTokenizer", "MitieFeaturizer", "MitieEntityExtractor" + ), + ), + ("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")), + ] + + +def pipelines_for_non_windows_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: + # these templates really are just for testing + + # because some of the components are not available on Windows, we specify pipelines + # containing them separately + + # first is language followed by list of components + return [ + ("en", as_pipeline("ConveRTTokenizer", "ConveRTFeaturizer", "DIETClassifier")), + ( + "en", as_pipeline( "MitieNLP", - "JiebaTokenizer", + "MitieTokenizer", "MitieFeaturizer", - "MitieEntityExtractor", - "SklearnIntentClassifier", + "MitieIntentClassifier", + "RegexEntityExtractor", ), ), ] @@ -65,101 +95,90 @@ def pipelines_for_tests(): def test_all_components_are_in_at_least_one_test_pipeline(): """There is a template that includes all components to test the train-persist-load-use cycle. Ensures that - really all Components are in there.""" + really all components are in there.""" + + all_pipelines = pipelines_for_tests() + pipelines_for_non_windows_tests() + all_components = [c["name"] for _, p in all_pipelines for c in p] - all_components = [c["name"] for _, p in pipelines_for_tests() for c in p] for cls in registry.component_classes: assert ( cls.name in all_components ), "`all_components` template is missing component." -@utilities.slowtest -@pytest.mark.parametrize( - "pipeline_template", list(registry.registered_pipeline_templates.keys()) -) -async def test_train_model(pipeline_template, component_builder, tmpdir): - _config = utilities.base_test_conf(pipeline_template) +@pytest.mark.parametrize("language, pipeline", pipelines_for_tests()) +async def test_train_persist_load_parse(language, pipeline, component_builder, tmpdir): + _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) + (trained, _, persisted_path) = await train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, component_builder=component_builder, ) + assert trained.pipeline + loaded = Interpreter.load(persisted_path, component_builder) - assert loaded.pipeline - assert loaded.parse("hello") is not None - assert loaded.parse("Hello today is Monday, again!") is not None + assert loaded.pipeline + assert loaded.parse("Rasa is great!") is not None -@utilities.slowtest -async def test_random_seed(component_builder, tmpdir): - """test if train result is the same for two runs of tf embedding""" - _config = utilities.base_test_conf("supervised_embeddings") - # set fixed random seed of the embedding intent classifier to 1 - _config.set_component_attr(6, random_seed=1) - # first run - (trained_a, _, persisted_path_a) = await train( - _config, - path=tmpdir.strpath + "_a", - data=DEFAULT_DATA_PATH, - component_builder=component_builder, - ) - # second run - (trained_b, _, persisted_path_b) = await train( - _config, - path=tmpdir.strpath + "_b", - data=DEFAULT_DATA_PATH, - component_builder=component_builder, - ) - loaded_a = Interpreter.load(persisted_path_a, component_builder) - loaded_b = Interpreter.load(persisted_path_b, component_builder) - result_a = loaded_a.parse("hello")["intent"]["confidence"] - result_b = loaded_b.parse("hello")["intent"]["confidence"] - assert result_a == result_b +@pytest.mark.parametrize("language, pipeline", pipelines_for_non_windows_tests()) +@pytest.mark.skip_on_windows +async def test_train_persist_load_parse_non_windows( + language, pipeline, component_builder, tmpdir +): + await test_train_persist_load_parse(language, pipeline, component_builder, tmpdir) -@utilities.slowtest @pytest.mark.parametrize("language, pipeline", pipelines_for_tests()) -async def test_train_model_on_test_pipelines( - language, pipeline, component_builder, tmpdir -): +def test_train_model_without_data(language, pipeline, component_builder, tmpdir): _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) - (trained, _, persisted_path) = await train( - _config, - path=tmpdir.strpath, - data=DEFAULT_DATA_PATH, - component_builder=component_builder, - ) - assert trained.pipeline + + trainer = Trainer(_config, component_builder) + trainer.train(TrainingData()) + persisted_path = trainer.persist(tmpdir.strpath) + loaded = Interpreter.load(persisted_path, component_builder) + assert loaded.pipeline - assert loaded.parse("hello") is not None - assert loaded.parse("Hello today is Monday, again!") is not None + assert loaded.parse("Rasa is great!") is not None + + +@pytest.mark.parametrize("language, pipeline", pipelines_for_non_windows_tests()) +@pytest.mark.skip_on_windows +def test_train_model_without_data_non_windows( + language, pipeline, component_builder, tmpdir +): + test_train_model_without_data(language, pipeline, component_builder, tmpdir) -@utilities.slowtest @pytest.mark.parametrize("language, pipeline", pipelines_for_tests()) -async def test_train_model_no_events(language, pipeline, component_builder, tmpdir): +def test_load_and_persist_without_train(language, pipeline, component_builder, tmpdir): _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) - (trained, _, persisted_path) = await train( - _config, - path=tmpdir.strpath, - data="./data/test/demo-rasa-noents.json", - component_builder=component_builder, - ) - assert trained.pipeline + + trainer = Trainer(_config, component_builder) + persisted_path = trainer.persist(tmpdir.strpath) + loaded = Interpreter.load(persisted_path, component_builder) + assert loaded.pipeline - assert loaded.parse("hello") is not None - assert loaded.parse("Hello today is Monday, again!") is not None + assert loaded.parse("Rasa is great!") is not None + + +@pytest.mark.parametrize("language, pipeline", pipelines_for_non_windows_tests()) +@pytest.mark.skip_on_windows +def test_load_and_persist_without_train_non_windows( + language, pipeline, component_builder, tmpdir +): + test_load_and_persist_without_train(language, pipeline, component_builder, tmpdir) async def test_train_model_empty_pipeline(component_builder): - # Should return an empty pipeline - _config = utilities.base_test_conf(pipeline_template=None) + _config = RasaNLUModelConfig({"pipeline": None, "language": "en"}) + with pytest.raises(ValueError): await train( _config, data=DEFAULT_DATA_PATH, component_builder=component_builder @@ -167,79 +186,75 @@ async def test_train_model_empty_pipeline(component_builder): async def test_train_named_model(component_builder, tmpdir): - _config = utilities.base_test_conf("keyword") + _config = RasaNLUModelConfig( + {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} + ) + (trained, _, persisted_path) = await train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, component_builder=component_builder, ) + assert trained.pipeline + normalized_path = os.path.dirname(os.path.normpath(persisted_path)) # should be saved in a dir named after a project assert normalized_path == tmpdir.strpath -async def test_handles_pipeline_with_non_existing_component(component_builder): - _config = utilities.base_test_conf("pretrained_embeddings_spacy") - _config.pipeline.append({"name": "my_made_up_component"}) +async def test_handles_pipeline_with_non_existing_component( + component_builder, pretrained_embeddings_spacy_config +): + pretrained_embeddings_spacy_config.pipeline.append({"name": "my_made_up_component"}) + with pytest.raises(Exception) as execinfo: await train( - _config, data=DEFAULT_DATA_PATH, component_builder=component_builder + pretrained_embeddings_spacy_config, + data=DEFAULT_DATA_PATH, + component_builder=component_builder, ) assert "Cannot find class" in str(execinfo.value) -@pytest.mark.parametrize("language, pipeline", pipelines_for_tests()) -def test_load_and_persist_without_train(language, pipeline, component_builder, tmpdir): - _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) - trainer = Trainer(_config, component_builder) - persistor = create_persistor(_config) - persisted_path = trainer.persist(tmpdir.strpath, persistor) - loaded = Interpreter.load(persisted_path, component_builder) - assert loaded.pipeline - assert loaded.parse("hello") is not None - assert loaded.parse("Hello today is Monday, again!") is not None - - -@pytest.mark.parametrize("language, pipeline", pipelines_for_tests()) -def test_train_with_empty_data(language, pipeline, component_builder, tmpdir): - _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) - trainer = Trainer(_config, component_builder) - trainer.train(TrainingData()) - persistor = create_persistor(_config) - persisted_path = trainer.persist(tmpdir.strpath, persistor) - loaded = Interpreter.load(persisted_path, component_builder) - assert loaded.pipeline - assert loaded.parse("hello") is not None - assert loaded.parse("Hello today is Monday, again!") is not None - +async def test_train_model_training_data_persisted(component_builder, tmpdir): + _config = RasaNLUModelConfig( + {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} + ) -async def test_train_model_no_training_data_persisted(component_builder, tmpdir): - _config = utilities.base_test_conf("keyword") (trained, _, persisted_path) = await train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, component_builder=component_builder, - persist_nlu_training_data=False, + persist_nlu_training_data=True, ) + assert trained.pipeline + loaded = Interpreter.load(persisted_path, component_builder) + assert loaded.pipeline - assert loaded.model_metadata.get("training_data") is None + assert loaded.model_metadata.get("training_data") is not None -async def test_train_model_training_data_persisted(component_builder, tmpdir): - _config = utilities.base_test_conf("keyword") +async def test_train_model_no_training_data_persisted(component_builder, tmpdir): + _config = RasaNLUModelConfig( + {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} + ) + (trained, _, persisted_path) = await train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, component_builder=component_builder, - persist_nlu_training_data=True, + persist_nlu_training_data=False, ) + assert trained.pipeline + loaded = Interpreter.load(persisted_path, component_builder) + assert loaded.pipeline - assert loaded.model_metadata.get("training_data") is not None + assert loaded.model_metadata.get("training_data") is None diff --git a/tests/nlu/base/test_utils.py b/tests/nlu/test_utils.py similarity index 95% rename from tests/nlu/base/test_utils.py rename to tests/nlu/test_utils.py index 6b18ee4845ba..def6599464ca 100644 --- a/tests/nlu/base/test_utils.py +++ b/tests/nlu/test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- import io import os import pickle @@ -7,6 +6,7 @@ import shutil import rasa.utils.io as io_utils from rasa.nlu import utils +from pathlib import Path @pytest.fixture(scope="function") @@ -26,14 +26,14 @@ def fake_model_dir(empty_model_dir): fake_obj = {"Fake", "model"} fake_obj_path = os.path.join(empty_model_dir, "component.pkl") - with io.open(fake_obj_path, "wb") as f: + with open(fake_obj_path, "wb") as f: pickle.dump(fake_obj, f) return empty_model_dir # not empty anymore ;) def test_relative_normpath(): test_file = "/my/test/path/file.txt" - assert utils.relative_normpath(test_file, "/my/test") == "path/file.txt" + assert utils.relative_normpath(test_file, "/my/test") == Path("path/file.txt") assert utils.relative_normpath(None, "/my/test") is None diff --git a/tests/nlu/tokenizers/__init__.py b/tests/nlu/tokenizers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/tokenizers/test_convert_tokenizer.py b/tests/nlu/tokenizers/test_convert_tokenizer.py new file mode 100644 index 000000000000..6bfc0ab19b68 --- /dev/null +++ b/tests/nlu/tokenizers/test_convert_tokenizer.py @@ -0,0 +1,74 @@ +import pytest + +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import TEXT, INTENT, TOKENS_NAMES, NUMBER_OF_SUB_TOKENS +from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [ + ( + "forecast for lunch", + ["forecast", "for", "lunch"], + [(0, 8), (9, 12), (13, 18)], + ), + ("hello", ["hello"], [(0, 5)]), + ("you're", ["you", "re"], [(0, 3), (4, 6)]), + ("r. n. b.", ["r", "n", "b"], [(0, 1), (3, 4), (6, 7)]), + ("rock & roll", ["rock", "&", "roll"], [(0, 4), (5, 6), (7, 11)]), + ("ńöñàśçií", ["ńöñàśçií"], [(0, 8)]), + ], +) +@pytest.mark.skip_on_windows +def test_convert_tokenizer_edge_cases( + component_builder, text, expected_tokens, expected_indices +): + tk = component_builder.create_component_from_class(ConveRTTokenizer) + + tokens = tk.tokenize(Message(text), attribute=TEXT) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +@pytest.mark.skip_on_windows +def test_custom_intent_symbol(component_builder, text, expected_tokens): + tk = component_builder.create_component_from_class( + ConveRTTokenizer, intent_tokenization_flag=True, intent_split_symbol="+" + ) + + message = Message(text) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens + + +@pytest.mark.parametrize( + "text, expected_number_of_sub_tokens", + [("Aarhus is a city", [2, 1, 1, 1]), ("sentence embeddings", [1, 3])], +) +@pytest.mark.skip_on_windows +def test_convert_tokenizer_number_of_sub_tokens( + component_builder, text, expected_number_of_sub_tokens +): + tk = component_builder.create_component_from_class(ConveRTTokenizer) + + message = Message(text) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [ + t.get(NUMBER_OF_SUB_TOKENS) for t in message.get(TOKENS_NAMES[TEXT]) + ] == expected_number_of_sub_tokens diff --git a/tests/nlu/tokenizers/test_jieba_tokenizer.py b/tests/nlu/tokenizers/test_jieba_tokenizer.py new file mode 100644 index 000000000000..426215541587 --- /dev/null +++ b/tests/nlu/tokenizers/test_jieba_tokenizer.py @@ -0,0 +1,67 @@ +from unittest.mock import patch + +from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer + +import pytest + +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import TEXT, INTENT, TOKENS_NAMES + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [ + ( + "我想去吃兰州拉面", + ["我", "想", "去", "吃", "兰州", "拉面"], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 6), (6, 8)], + ), + ( + "Micheal你好吗?", + ["Micheal", "你好", "吗", "?"], + [(0, 7), (7, 9), (9, 10), (10, 11)], + ), + ], +) +def test_jieba(text, expected_tokens, expected_indices): + tk = JiebaTokenizer() + + tokens = tk.tokenize(Message(text), attribute=TEXT) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +def test_jieba_load_dictionary(tmpdir_factory): + dictionary_path = tmpdir_factory.mktemp("jieba_custom_dictionary").strpath + + component_config = {"dictionary_path": dictionary_path} + + with patch.object( + JiebaTokenizer, "load_custom_dictionary", return_value=None + ) as mock_method: + tk = JiebaTokenizer(component_config) + tk.tokenize(Message(""), attribute=TEXT) + + mock_method.assert_called_once_with(dictionary_path) + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +def test_custom_intent_symbol(text, expected_tokens): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + tk = JiebaTokenizer(component_config) + + message = Message(text) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens diff --git a/tests/nlu/tokenizers/test_lm_tokenizer.py b/tests/nlu/tokenizers/test_lm_tokenizer.py new file mode 100644 index 000000000000..493bc7ef35df --- /dev/null +++ b/tests/nlu/tokenizers/test_lm_tokenizer.py @@ -0,0 +1,386 @@ +import pytest + +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import ( + TEXT, + INTENT, + TOKENS_NAMES, + LANGUAGE_MODEL_DOCS, + TOKEN_IDS, + NUMBER_OF_SUB_TOKENS, +) +from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer +from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP + + +# TODO: need to fix this failing test +@pytest.mark.skip(reason="Results in random crashing of github action workers") +@pytest.mark.parametrize( + "model_name, texts, expected_tokens, expected_indices, expected_num_token_ids", + [ + ( + "bert", + [ + "Good evening.", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["good", "evening"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + [ + "here", + "is", + "the", + "sentence", + "i", + "want", + "em", + "bed", + "ding", + "s", + "for", + ], + ], + [ + [(0, 4), (5, 12)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 20), + (21, 22), + (23, 27), + (28, 30), + (30, 33), + (33, 37), + (37, 38), + (39, 42), + ], + ], + [4, 4, 5, 5, 13], + ), + ( + "gpt", + [ + "Good evening.", + "hello", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["good", "evening"], + ["hello"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + ["here", "is", "the", "sentence", "i", "want", "embe", "ddings", "for"], + ], + [ + [(0, 4), (5, 12)], + [(0, 5)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 20), + (21, 22), + (23, 27), + (28, 32), + (32, 38), + (39, 42), + ], + ], + [2, 1, 2, 3, 3, 9], + ), + ( + "gpt2", + [ + "Good evening.", + "hello", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["Good", "even", "ing"], + ["hello"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + [ + "here", + "is", + "the", + "sent", + "ence", + "I", + "want", + "embed", + "d", + "ings", + "for", + ], + ], + [ + [(0, 4), (5, 9), (9, 12)], + [(0, 5)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 16), + (16, 20), + (21, 22), + (23, 27), + (28, 33), + (33, 34), + (34, 38), + (39, 42), + ], + ], + [3, 1, 2, 3, 3, 11], + ), + ( + "xlnet", + [ + "Good evening.", + "hello", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["Good", "evening"], + ["hello"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + [ + "here", + "is", + "the", + "sentence", + "I", + "want", + "embed", + "ding", + "s", + "for", + ], + ], + [ + [(0, 4), (5, 12)], + [(0, 5)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 20), + (21, 22), + (23, 27), + (28, 33), + (33, 37), + (37, 38), + (39, 42), + ], + ], + [4, 3, 4, 5, 5, 12], + ), + ( + "distilbert", + [ + "Good evening.", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["good", "evening"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + [ + "here", + "is", + "the", + "sentence", + "i", + "want", + "em", + "bed", + "ding", + "s", + "for", + ], + ], + [ + [(0, 4), (5, 12)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 20), + (21, 22), + (23, 27), + (28, 30), + (30, 33), + (33, 37), + (37, 38), + (39, 42), + ], + ], + [4, 4, 5, 5, 13], + ), + ( + "roberta", + [ + "Good evening.", + "hello", + "you're", + "r. n. b.", + "rock & roll", + "here is the sentence I want embeddings for.", + ], + [ + ["Good", "even", "ing"], + ["hello"], + ["you", "re"], + ["r", "n", "b"], + ["rock", "&", "roll"], + [ + "here", + "is", + "the", + "sent", + "ence", + "I", + "want", + "embed", + "d", + "ings", + "for", + ], + ], + [ + [(0, 4), (5, 9), (9, 12)], + [(0, 5)], + [(0, 3), (4, 6)], + [(0, 1), (3, 4), (6, 7)], + [(0, 4), (5, 6), (7, 11)], + [ + (0, 4), + (5, 7), + (8, 11), + (12, 16), + (16, 20), + (21, 22), + (23, 27), + (28, 33), + (33, 34), + (34, 38), + (39, 42), + ], + ], + [5, 3, 4, 5, 5, 13], + ), + ], +) +@pytest.mark.skip_on_windows +def test_lm_tokenizer_edge_cases( + model_name, texts, expected_tokens, expected_indices, expected_num_token_ids +): + + transformers_config = {"model_name": model_name} + + transformers_nlp = HFTransformersNLP(transformers_config) + lm_tokenizer = LanguageModelTokenizer() + + for text, gt_tokens, gt_indices, gt_num_indices in zip( + texts, expected_tokens, expected_indices, expected_num_token_ids + ): + + message = Message.build(text=text) + transformers_nlp.process(message) + tokens = lm_tokenizer.tokenize(message, TEXT) + token_ids = message.get(LANGUAGE_MODEL_DOCS[TEXT])[TOKEN_IDS] + + assert [t.text for t in tokens] == gt_tokens + assert [t.start for t in tokens] == [i[0] for i in gt_indices] + assert [t.end for t in tokens] == [i[1] for i in gt_indices] + assert len(token_ids) == gt_num_indices + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ("Forecast+for+LUNCH", ["Forecast", "for", "LUNCH"]), + ], +) +@pytest.mark.skip_on_windows +def test_lm_tokenizer_custom_intent_symbol(text, expected_tokens): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + transformers_config = {"model_name": "bert"} # Test for one should be enough + + transformers_nlp = HFTransformersNLP(transformers_config) + lm_tokenizer = LanguageModelTokenizer(component_config) + + message = Message(text) + message.set(INTENT, text) + + td = TrainingData([message]) + + transformers_nlp.train(td) + lm_tokenizer.train(td) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens + + +@pytest.mark.parametrize( + "text, expected_number_of_sub_tokens", + [("sentence embeddings", [1, 4]), ("this is a test", [1, 1, 1, 1])], +) +@pytest.mark.skip_on_windows +def test_lm_tokenizer_number_of_sub_tokens(text, expected_number_of_sub_tokens): + transformers_config = {"model_name": "bert"} # Test for one should be enough + + transformers_nlp = HFTransformersNLP(transformers_config) + lm_tokenizer = LanguageModelTokenizer() + + message = Message(text) + + td = TrainingData([message]) + + transformers_nlp.train(td) + lm_tokenizer.train(td) + + assert [ + t.get(NUMBER_OF_SUB_TOKENS) for t in message.get(TOKENS_NAMES[TEXT]) + ] == expected_number_of_sub_tokens diff --git a/tests/nlu/tokenizers/test_mitie_tokenizer.py b/tests/nlu/tokenizers/test_mitie_tokenizer.py new file mode 100644 index 000000000000..ebf40b0f9415 --- /dev/null +++ b/tests/nlu/tokenizers/test_mitie_tokenizer.py @@ -0,0 +1,50 @@ +import pytest + +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.constants import TEXT, INTENT, TOKENS_NAMES +from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [ + ( + "Forecast for lunch", + ["Forecast", "for", "lunch"], + [(0, 8), (9, 12), (13, 18)], + ), + ( + "hey ńöñàśçií how're you?", + ["hey", "ńöñàśçií", "how", "'re", "you", "?"], + [(0, 3), (4, 12), (13, 16), (16, 19), (20, 23), (23, 24)], + ), + ], +) +def test_mitie(text, expected_tokens, expected_indices): + tk = MitieTokenizer() + + tokens = tk.tokenize(Message(text), attribute=TEXT) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +def test_custom_intent_symbol(text, expected_tokens): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + tk = MitieTokenizer(component_config) + + message = Message(text) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens diff --git a/tests/nlu/tokenizers/test_spacy_tokenizer.py b/tests/nlu/tokenizers/test_spacy_tokenizer.py new file mode 100644 index 000000000000..171aab84dc9b --- /dev/null +++ b/tests/nlu/tokenizers/test_spacy_tokenizer.py @@ -0,0 +1,98 @@ +import pytest + +from rasa.nlu.training_data import TrainingData +from rasa.nlu.training_data import Message +from rasa.nlu.constants import TEXT, SPACY_DOCS, INTENT, RESPONSE, TOKENS_NAMES +from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [ + ( + "Forecast for lunch", + ["Forecast", "for", "lunch"], + [(0, 8), (9, 12), (13, 18)], + ), + ( + "hey ńöñàśçií how're you?", + ["hey", "ńöñàśçií", "how", "'re", "you", "?"], + [(0, 3), (4, 12), (13, 16), (16, 19), (20, 23), (23, 24)], + ), + ], +) +def test_spacy(text, expected_tokens, expected_indices, spacy_nlp): + tk = SpacyTokenizer() + + message = Message(text) + message.set(SPACY_DOCS[TEXT], spacy_nlp(text)) + + tokens = tk.tokenize(message, attribute=TEXT) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_pos_tags", + [ + ("Forecast for lunch", ["NN", "IN", "NN"]), + ("Hello, how are you?", ["UH", ",", "WRB", "VBP", "PRP", "."]), + ], +) +def test_spacy_pos_tags(text, expected_pos_tags, spacy_nlp): + tk = SpacyTokenizer() + + message = Message(text) + message.set(SPACY_DOCS[TEXT], spacy_nlp(text)) + + tokens = tk.tokenize(message, attribute=TEXT) + + assert [t.data.get("pos") for t in tokens] == expected_pos_tags + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])], +) +def test_train_tokenizer(text, expected_tokens, expected_indices, spacy_nlp): + tk = SpacyTokenizer() + + message = Message(text) + message.set(SPACY_DOCS[TEXT], spacy_nlp(text)) + message.set(RESPONSE, text) + message.set(SPACY_DOCS[RESPONSE], spacy_nlp(text)) + + training_data = TrainingData() + training_data.training_examples = [message] + + tk.train(training_data) + + for attribute in [RESPONSE, TEXT]: + tokens = training_data.training_examples[0].get(TOKENS_NAMES[attribute]) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +def test_custom_intent_symbol(text, expected_tokens, spacy_nlp): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + tk = SpacyTokenizer(component_config) + + message = Message(text) + message.set(SPACY_DOCS[TEXT], spacy_nlp(text)) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens diff --git a/tests/nlu/tokenizers/test_tokenizer.py b/tests/nlu/tokenizers/test_tokenizer.py new file mode 100644 index 000000000000..f250472f182f --- /dev/null +++ b/tests/nlu/tokenizers/test_tokenizer.py @@ -0,0 +1,136 @@ +from typing import List, Text + +import pytest + +from rasa.nlu.tokenizers.tokenizer import Token +from rasa.nlu.constants import TEXT, INTENT, RESPONSE, TOKENS_NAMES +from rasa.nlu.training_data import Message, TrainingData +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer + + +def test_tokens_comparison(): + x = Token("hello", 0) + y = Token("Hello", 0) + + assert x == x + assert y < x + + assert x != 1 + + with pytest.raises(TypeError): + assert y < "a" + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])], +) +def test_train_tokenizer(text, expected_tokens, expected_indices): + tk = WhitespaceTokenizer() + + message = Message(text) + message.set(RESPONSE, text) + message.set(INTENT, text) + + training_data = TrainingData() + training_data.training_examples = [message] + + tk.train(training_data) + + for attribute in [RESPONSE, TEXT]: + tokens = training_data.training_examples[0].get(TOKENS_NAMES[attribute]) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + # check intent attribute + tokens = training_data.training_examples[0].get(TOKENS_NAMES[INTENT]) + + assert [t.text for t in tokens] == [text] + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])], +) +def test_process_tokenizer(text, expected_tokens, expected_indices): + tk = WhitespaceTokenizer() + + message = Message(text) + + tk.process(message) + + tokens = message.get(TOKENS_NAMES[TEXT]) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +def test_split_intent(text, expected_tokens): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + tk = WhitespaceTokenizer(component_config) + + message = Message(text) + message.set(INTENT, text) + + assert [t.text for t in tk._split_intent(message)] == expected_tokens + + +@pytest.mark.parametrize( + "token_pattern, tokens, expected_tokens", + [ + ( + None, + [Token("hello", 0), Token("there", 6)], + [Token("hello", 0), Token("there", 6)], + ), + ( + "", + [Token("hello", 0), Token("there", 6)], + [Token("hello", 0), Token("there", 6)], + ), + ( + r"(?u)\b\w\w+\b", + [Token("role-based", 0), Token("access-control", 11)], + [ + Token("role", 0), + Token("based", 5), + Token("access", 11), + Token("control", 18), + ], + ), + ( + r".*", + [Token("role-based", 0), Token("access-control", 11)], + [Token("role-based", 0), Token("access-control", 11)], + ), + ( + r"(test)", + [Token("role-based", 0), Token("access-control", 11)], + [Token("role-based", 0), Token("access-control", 11)], + ), + ], +) +def test_apply_token_pattern( + token_pattern: Text, tokens: List[Token], expected_tokens: List[Token] +): + component_config = {"token_pattern": token_pattern} + + tokenizer = WhitespaceTokenizer(component_config) + actual_tokens = tokenizer._apply_token_pattern(tokens) + + assert len(actual_tokens) == len(expected_tokens) + for actual_token, expected_token in zip(actual_tokens, expected_tokens): + assert actual_token.text == expected_token.text + assert actual_token.start == expected_token.start + assert actual_token.end == expected_token.end diff --git a/tests/nlu/tokenizers/test_whitespace_tokenizer.py b/tests/nlu/tokenizers/test_whitespace_tokenizer.py new file mode 100644 index 000000000000..77368a44b6c7 --- /dev/null +++ b/tests/nlu/tokenizers/test_whitespace_tokenizer.py @@ -0,0 +1,161 @@ +import pytest + +from rasa.nlu.components import UnsupportedLanguageError +from rasa.nlu.config import RasaNLUModelConfig +from rasa.nlu.constants import TOKENS_NAMES, TEXT, INTENT +from rasa.nlu.training_data import TrainingData, Message +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer + + +@pytest.mark.parametrize( + "text, expected_tokens, expected_indices", + [ + ( + "Forecast for lunch", + ["Forecast", "for", "lunch"], + [(0, 8), (9, 12), (13, 18)], + ), + ( + "hey ńöñàśçií how're you?", + ["hey", "ńöñàśçií", "how", "re", "you"], + [(0, 3), (4, 12), (13, 16), (17, 19), (20, 23)], + ), + ( + "50 क्या आपके पास डेरी मिल्क 10 वाले बॉक्स मिल सकते है", + [ + "50", + "क्या", + "आपके", + "पास", + "डेरी", + "मिल्क", + "10", + "वाले", + "बॉक्स", + "मिल", + "सकते", + "है", + ], + [ + (0, 2), + (3, 7), + (8, 12), + (13, 16), + (17, 21), + (22, 27), + (28, 30), + (31, 35), + (36, 41), + (42, 45), + (46, 50), + (51, 53), + ], + ), + ( + "https://www.google.com/search?client=safari&rls=en&q=i+like+rasa&ie=UTF-8&oe=UTF-8 https://rasa.com/docs/nlu/components/#tokenizer-whitespace", + [ + "https://www.google.com/search?" + "client=safari&rls=en&q=i+like+rasa&ie=UTF-8&oe=UTF-8", + "https://rasa.com/docs/nlu/components/#tokenizer-whitespace", + ], + [(0, 82), (83, 141)], + ), + ( + "Joselico gracias Dois 🙏🇺🇸🏦🛠🔥⭐️🦅👑💪", + ["Joselico", "gracias", "Dois"], + [(0, 8), (9, 16), (17, 21)], + ), + (":)", [":)"], [(0, 2)]), + ("Hi :-)", ["Hi"], [(0, 2)]), + ("👍", ["👍"], [(0, 1)]), + ], +) +def test_whitespace(text, expected_tokens, expected_indices): + + tk = WhitespaceTokenizer() + + tokens = tk.tokenize(Message(text), attribute=TEXT) + + assert [t.text for t in tokens] == expected_tokens + assert [t.start for t in tokens] == [i[0] for i in expected_indices] + assert [t.end for t in tokens] == [i[1] for i in expected_indices] + + +@pytest.mark.parametrize( + "text, expected_tokens", + [ + ("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]), + ("Forecast for LUNCH", ["Forecast for LUNCH"]), + ], +) +def test_custom_intent_symbol(text, expected_tokens): + component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"} + + tk = WhitespaceTokenizer(component_config) + + message = Message(text) + message.set(INTENT, text) + + tk.train(TrainingData([message])) + + assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens + + +def test_whitespace_training(supervised_embeddings_config): + examples = [ + Message( + "Any Mexican restaurant will do", + { + "intent": "restaurant_search", + "entities": [ + {"start": 4, "end": 11, "value": "Mexican", "entity": "cuisine"} + ], + }, + ), + Message( + "I want Tacos!", + { + "intent": "restaurant_search", + "entities": [ + {"start": 7, "end": 12, "value": "Mexican", "entity": "cuisine"} + ], + }, + ), + ] + + tk = WhitespaceTokenizer() + + tk.train(TrainingData(training_examples=examples), supervised_embeddings_config) + + assert examples[0].data.get(TOKENS_NAMES[TEXT])[0].text == "Any" + assert examples[0].data.get(TOKENS_NAMES[TEXT])[1].text == "Mexican" + assert examples[0].data.get(TOKENS_NAMES[TEXT])[2].text == "restaurant" + assert examples[0].data.get(TOKENS_NAMES[TEXT])[3].text == "will" + assert examples[0].data.get(TOKENS_NAMES[TEXT])[4].text == "do" + assert examples[1].data.get(TOKENS_NAMES[TEXT])[0].text == "I" + assert examples[1].data.get(TOKENS_NAMES[TEXT])[1].text == "want" + assert examples[1].data.get(TOKENS_NAMES[TEXT])[2].text == "Tacos" + + +def test_whitespace_does_not_throw_error(): + import rasa.utils.io as io_utils + + texts = io_utils.read_json_file("data/test_tokenizers/naughty_strings.json") + + tk = WhitespaceTokenizer() + + for text in texts: + tk.tokenize(Message(text), attribute=TEXT) + + +@pytest.mark.parametrize("language, error", [("en", False), ("zh", True)]) +def test_whitespace_language_suuport(language, error, component_builder): + config = RasaNLUModelConfig( + {"language": language, "pipeline": [{"name": "WhitespaceTokenizer"}]} + ) + + if error: + with pytest.raises(UnsupportedLanguageError): + component_builder.create_component({"name": "WhitespaceTokenizer"}, config) + else: + component_builder.create_component({"name": "WhitespaceTokenizer"}, config) diff --git a/tests/nlu/training_data/__init__.py b/tests/nlu/training_data/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/training_data/formats/test_markdown.py b/tests/nlu/training_data/formats/test_markdown.py new file mode 100644 index 000000000000..773d23a9e5bc --- /dev/null +++ b/tests/nlu/training_data/formats/test_markdown.py @@ -0,0 +1,231 @@ +from typing import Optional, Text, Dict, Any, List + +import pytest + +from rasa.nlu import load_data +from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor +from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor +from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor +from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor +from rasa.nlu.training_data.formats import RasaReader +from rasa.nlu.training_data.formats import MarkdownReader, MarkdownWriter + + +@pytest.mark.parametrize( + "example, expected_num_entities", + [ + ( + "I need an [economy class](travel_flight_class:economy) ticket from " + '[boston]{"entity": "city", "role": "from"} to [new york]{"entity": "city",' + ' "role": "to"}, please.', + 3, + ), + ("i'm looking for a place to eat", 0), + ("i'm looking for a place in the [north](loc-direction) of town", 1), + ("show me [chines](cuisine:chinese) restaurants", 1), + ( + 'show me [italian]{"entity": "cuisine", "value": "22_ab-34*3.A:43er*+?df"} ' + "restaurants", + 1, + ), + ("Do you know {ABC} club?", 0), + ("show me [chines](22_ab-34*3.A:43er*+?df) restaurants", 1), + ( + 'I want to fly from [Berlin]{"entity": "city", "role": "to"} to [LA]{' + '"entity": "city", "role": "from", "value": "Los Angeles"}', + 2, + ), + ( + 'I want to fly from [Berlin](city) to [LA]{"entity": "city", "role": ' + '"from", "value": "Los Angeles"}', + 2, + ), + ( + 'I want to travel to [Cairo]{"entity": "city"} \\n' + 'Also, do you have flights to [Alexandria]{"entity": "city"}?', + 2, + ), + ], +) +def test_markdown_entity_regex(example: Text, expected_num_entities: int): + r = MarkdownReader() + + md = f""" +## intent:test-intent +- {example} + """ + + result = r.reads(md) + + assert len(result.training_examples) == 1 + actual_example = result.training_examples[0] + assert actual_example.data["intent"] == "test-intent" + assert len(actual_example.data.get("entities", [])) == expected_num_entities + + +def test_markdown_empty_section(): + data = load_data("data/test/markdown_single_sections/empty_section.md") + assert data.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}] + + assert not data.entity_synonyms + assert len(data.lookup_tables) == 1 + assert data.lookup_tables[0]["name"] == "chinese" + assert "Chinese" in data.lookup_tables[0]["elements"] + assert "Chines" in data.lookup_tables[0]["elements"] + + +def test_markdown_not_existing_section(): + with pytest.raises(ValueError): + load_data("data/test/markdown_single_sections/not_existing_section.md") + + +def test_section_value_with_delimiter(): + td_section_with_delimiter = load_data( + "data/test/markdown_single_sections/section_with_delimiter.md" + ) + assert td_section_with_delimiter.entity_synonyms == {"10:00 am": "10:00"} + + +def test_markdown_order(): + r = MarkdownReader() + + md = """## intent:z +- i'm looking for a place to eat +- i'm looking for a place in the [north](loc-direction) of town + +## intent:a +- intent a +- also very important +""" + + training_data = r.reads(md) + assert training_data.nlu_as_markdown() == md + + +def test_markdown_unespace_tokens(): + r = MarkdownReader() + + md = """## intent:test-intent +- Hi \\t Can you help me?\\n I want to go to [Alexandria]{"entity": "city"} +""" + expected_num_entities = 1 + + training_data = r.reads(md) + assert len(training_data.training_examples) == 1 + + actual_example = training_data.training_examples[0] + assert actual_example.data["intent"] == "test-intent" + assert len(actual_example.data.get("entities", [])) == expected_num_entities + + +def test_dump_nlu_with_responses(): + md = """## intent:greet +- hey +- howdy +- hey there +- hello +- hi +- good morning +- good evening +- dear sir + +## intent:chitchat/ask_name +- What's your name? +- What can I call you? + +## intent:chitchat/ask_weather +- How's the weather? +- Is it too hot outside? +""" + + r = MarkdownReader() + nlu_data = r.reads(md) + + dumped = nlu_data.nlu_as_markdown() + assert dumped == md + + +@pytest.mark.parametrize( + "entity_extractor,expected_output", + [ + (None, '- [test]{"entity": "word", "value": "random"}'), + ("", '- [test]{"entity": "word", "value": "random"}'), + ("random-extractor", '- [test]{"entity": "word", "value": "random"}'), + (CRFEntityExtractor.__name__, '- [test]{"entity": "word", "value": "random"}'), + (DucklingHTTPExtractor.__name__, "- test"), + (SpacyEntityExtractor.__name__, "- test"), + ( + MitieEntityExtractor.__name__, + '- [test]{"entity": "word", "value": "random"}', + ), + ], +) +def test_dump_trainable_entities( + entity_extractor: Optional[Text], expected_output: Text +): + training_data_json = { + "rasa_nlu_data": { + "common_examples": [ + { + "text": "test", + "intent": "greet", + "entities": [ + {"start": 0, "end": 4, "value": "random", "entity": "word"} + ], + } + ] + } + } + if entity_extractor is not None: + training_data_json["rasa_nlu_data"]["common_examples"][0]["entities"][0][ + "extractor" + ] = entity_extractor + + training_data_object = RasaReader().read_from_json(training_data_json) + md_dump = MarkdownWriter().dumps(training_data_object) + assert md_dump.splitlines()[1] == expected_output + + +@pytest.mark.parametrize( + "entity, expected_output", + [ + ( + { + "start": 0, + "end": 4, + "value": "random", + "entity": "word", + "role": "role-name", + "group": "group-name", + }, + '- [test]{"entity": "word", "role": "role-name", "group": "group-name", ' + '"value": "random"}', + ), + ({"start": 0, "end": 4, "entity": "word"}, "- [test](word)"), + ( + { + "start": 0, + "end": 4, + "entity": "word", + "role": "role-name", + "group": "group-name", + }, + '- [test]{"entity": "word", "role": "role-name", "group": "group-name"}', + ), + ( + {"start": 0, "end": 4, "entity": "word", "value": "random"}, + '- [test]{"entity": "word", "value": "random"}', + ), + ], +) +def test_dump_entities(entity: Dict[Text, Any], expected_output: Text): + training_data_json = { + "rasa_nlu_data": { + "common_examples": [ + {"text": "test", "intent": "greet", "entities": [entity]} + ] + } + } + training_data_object = RasaReader().read_from_json(training_data_json) + md_dump = MarkdownWriter().dumps(training_data_object) + assert md_dump.splitlines()[1] == expected_output diff --git a/tests/nlu/training_data/formats/test_rasa_yaml.py b/tests/nlu/training_data/formats/test_rasa_yaml.py new file mode 100644 index 000000000000..fab01da82309 --- /dev/null +++ b/tests/nlu/training_data/formats/test_rasa_yaml.py @@ -0,0 +1,381 @@ +import textwrap +from typing import Text + +import pytest + +from rasa.core.domain import InvalidDomain +import rasa.utils.io as io_utils +from rasa.constants import LATEST_TRAINING_DATA_FORMAT_VERSION +from rasa.nlu.constants import INTENT +from rasa.nlu.training_data.formats.rasa_yaml import RasaYAMLReader, RasaYAMLWriter + +MULTILINE_INTENT_EXAMPLES = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- intent: intent_name + examples: | + - how much CO2 will that use? + - how much carbon will a one way flight from [new york]{{"entity": "city", "role": "from"}} to california produce? +""" + +MULTILINE_INTENT_EXAMPLE_WITH_SYNONYM = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- intent: intent_name + examples: | + - flight from [boston]{{"entity": "city", "role": "from", "value": "bostn"}}? +""" + +MULTILINE_INTENT_EXAMPLES_NO_LEADING_SYMBOL = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- intent: intent_name + examples: | + how much CO2 will that use? + - how much carbon will a one way flight from [new york]{{"entity": "city", "role": "from"}} to california produce? +""" + +EXAMPLE_NO_VERSION_SPECIFIED = """ +nlu: +- intent: intent_name + examples: | + - how much carbon will a one way flight from [new york]{"entity": "city", "role": "from"} to california produce? +""" + +INTENT_EXAMPLES_WITH_METADATA = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- intent: intent_name + metadata: + examples: + - text: | + how much CO2 will that use? + metadata: + sentiment: positive + - text: | + how much carbon will a one way flight from [new york]{{"entity": "city", "role": "from"}} to california produce? +""" + +MINIMAL_VALID_EXAMPLE = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu:\n +stories: +""" + +WRONG_YAML_NLU_CONTENT_1 = """ +nlu: +- intent: name + non_key: value +""" + +WRONG_YAML_NLU_CONTENT_2 = """ +nlu: +- intent: greet + examples: | + - Hi + - Hey +""" + +SYNONYM_EXAMPLE = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- synonym: savings + examples: | + - pink pig + - savings account +""" + +LOOKUP_ITEM_NAME = "additional_currencies" +LOOKUP_EXAMPLE = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- lookup: {LOOKUP_ITEM_NAME} + examples: | + - Peso + - Euro + - Dollar +""" + +REGEX_NAME = "zipcode" +PATTERN_1 = "[0-9]{4}" +PATTERN_2 = "[0-9]{5}" +REGEX_EXAMPLE = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +nlu: +- regex: {REGEX_NAME} + examples: | + - {PATTERN_1} + - {PATTERN_2} +""" + + +def test_wrong_format_raises(): + + wrong_yaml_nlu_content = """ + !! + """ + + parser = RasaYAMLReader() + with pytest.raises(ValueError): + parser.reads(wrong_yaml_nlu_content) + + +@pytest.mark.parametrize( + "example", [WRONG_YAML_NLU_CONTENT_1, WRONG_YAML_NLU_CONTENT_2] +) +def test_wrong_schema_raises(example: Text): + + parser = RasaYAMLReader() + with pytest.raises(ValueError): + parser.reads(example) + + +@pytest.mark.parametrize( + "example", [MULTILINE_INTENT_EXAMPLES, INTENT_EXAMPLES_WITH_METADATA] +) +def test_multiline_intent_is_parsed(example: Text): + parser = RasaYAMLReader() + + with pytest.warns(None) as record: + training_data = parser.reads(example) + + assert not len(record) + + assert len(training_data.training_examples) == 2 + assert training_data.training_examples[0].get( + INTENT + ) == training_data.training_examples[1].get(INTENT) + assert not len(training_data.entity_synonyms) + + +@pytest.mark.parametrize( + "example", + [MULTILINE_INTENT_EXAMPLES, SYNONYM_EXAMPLE, LOOKUP_EXAMPLE, REGEX_EXAMPLE], +) +def test_yaml_examples_are_written(example: Text): + parser = RasaYAMLReader() + writer = RasaYAMLWriter() + + training_data = parser.reads(example) + assert example.strip() == writer.dumps(training_data).strip() + + +def test_multiline_intent_example_is_skipped_when_no_leading_symbol(): + parser = RasaYAMLReader() + + with pytest.warns(None) as record: + training_data = parser.reads(MULTILINE_INTENT_EXAMPLES_NO_LEADING_SYMBOL) + + # warning for the missing leading symbol + assert len(record) == 1 + + assert len(training_data.training_examples) == 1 + assert not len(training_data.entity_synonyms) + + +def test_no_version_specified_raises_warning(): + parser = RasaYAMLReader() + + with pytest.warns(None) as record: + parser.reads(EXAMPLE_NO_VERSION_SPECIFIED) + + # warning for the missing version string + assert len(record) == 1 + + +@pytest.mark.parametrize( + "example, expected_num_entities", + [ + ( + "I need an [economy class](travel_flight_class:economy) ticket from " + '[boston]{"entity": "city", "role": "from"} to [new york]{"entity": "city",' + ' "role": "to"}, please.', + 3, + ), + ("i'm looking for a place to eat", 0), + ("i'm looking for a place in the [north](loc-direction) of town", 1), + ("show me [chines](cuisine:chinese) restaurants", 1), + ( + 'show me [italian]{"entity": "cuisine", "value": "22_ab-34*3.A:43er*+?df"} ' + "restaurants", + 1, + ), + ("Do you know {ABC} club?", 0), + ("show me [chines](22_ab-34*3.A:43er*+?df) restaurants", 1), + ( + 'I want to fly from [Berlin]{"entity": "city", "role": "to"} to [LA]{' + '"entity": "city", "role": "from", "value": "Los Angeles"}', + 2, + ), + ( + 'I want to fly from [Berlin](city) to [LA]{"entity": "city", "role": ' + '"from", "value": "Los Angeles"}', + 2, + ), + ], +) +def test_entity_is_extracted(example: Text, expected_num_entities: int): + reader = RasaYAMLReader() + + intent_name = "test-intent" + + yaml_string = f""" +nlu: +- intent: {intent_name} + examples: | + - {example} +""" + + result = reader.reads(yaml_string) + + assert len(result.training_examples) == 1 + actual_example = result.training_examples[0] + assert actual_example.data["intent"] == intent_name + assert len(actual_example.data.get("entities", [])) == expected_num_entities + + +def test_synonyms_are_parsed(): + parser = RasaYAMLReader() + training_data = parser.reads(SYNONYM_EXAMPLE) + + assert len(training_data.entity_synonyms) == 2 + assert training_data.entity_synonyms["pink pig"] == "savings" + assert training_data.entity_synonyms["savings account"] == "savings" + + +def test_synonyms_are_extracted_from_entities(): + parser = RasaYAMLReader() + training_data = parser.reads(MULTILINE_INTENT_EXAMPLE_WITH_SYNONYM) + + assert len(training_data.entity_synonyms) == 1 + + +def test_lookup_is_parsed(): + + parser = RasaYAMLReader() + training_data = parser.reads(LOOKUP_EXAMPLE) + + assert training_data.lookup_tables[0]["name"] == LOOKUP_ITEM_NAME + assert len(training_data.lookup_tables[0]["elements"]) == 3 + + +def test_regex_is_parsed(): + + parser = RasaYAMLReader() + training_data = parser.reads(REGEX_EXAMPLE) + + assert len(training_data.regex_features) == 2 + assert {"name": REGEX_NAME, "pattern": PATTERN_1} in training_data.regex_features + assert {"name": REGEX_NAME, "pattern": PATTERN_2} in training_data.regex_features + + +def test_minimal_valid_example(): + parser = RasaYAMLReader() + + with pytest.warns(None) as record: + parser.reads(MINIMAL_VALID_EXAMPLE) + + assert not len(record) + + +def test_minimal_yaml_nlu_file(tmp_path): + target_file = tmp_path / "test_nlu_file.yaml" + io_utils.write_yaml(MINIMAL_VALID_EXAMPLE, target_file, True) + assert RasaYAMLReader.is_yaml_nlu_file(target_file) + + +def test_nlg_reads_text(): + responses_yml = textwrap.dedent( + """ + responses: + chitchat/ask_weather: + - text: Where do you want to check the weather? + """ + ) + + reader = RasaYAMLReader() + result = reader.reads(responses_yml) + + assert result.responses == { + "chitchat/ask_weather": [{"text": "Where do you want to check the weather?"}] + } + + +def test_nlg_reads_any_multimedia(): + responses_yml = textwrap.dedent( + """ + responses: + chitchat/ask_weather: + - text: Where do you want to check the weather? + image: https://example.com/weather.jpg + temperature: 25°C + """ + ) + + reader = RasaYAMLReader() + result = reader.reads(responses_yml) + + assert result.responses == { + "chitchat/ask_weather": [ + { + "text": "Where do you want to check the weather?", + "image": "https://example.com/weather.jpg", + "temperature": "25°C", + } + ] + } + + +def test_nlg_fails_to_read_empty(): + responses_yml = textwrap.dedent( + """ + responses: + """ + ) + + reader = RasaYAMLReader() + + with pytest.raises(ValueError): + reader.reads(responses_yml) + + +def test_nlg_fails_on_empty_response(): + responses_yml = textwrap.dedent( + """ + responses: + chitchat/ask_weather: + """ + ) + + reader = RasaYAMLReader() + + with pytest.raises(InvalidDomain): + reader.reads(responses_yml) + + +def test_nlg_multimedia_load_dump_roundtrip(): + responses_yml = textwrap.dedent( + """ + responses: + chitchat/ask_weather: + - text: Where do you want to check the weather? + image: https://example.com/weather.jpg + temperature: 25°C + + chitchat/ask_name: + - text: My name is Sara. + """ + ) + + reader = RasaYAMLReader() + result = reader.reads(responses_yml) + + dumped = RasaYAMLWriter().dumps(result) + + validation_reader = RasaYAMLReader() + dumped_result = validation_reader.reads(dumped) + + assert dumped_result.responses == result.responses + + # dumping again should also not change the format + assert dumped == RasaYAMLWriter().dumps(dumped_result) diff --git a/tests/nlu/training_data/test_entities_parser.py b/tests/nlu/training_data/test_entities_parser.py new file mode 100644 index 000000000000..c84ec01b068c --- /dev/null +++ b/tests/nlu/training_data/test_entities_parser.py @@ -0,0 +1,115 @@ +from typing import Text, List, Dict, Any + +import pytest + +import rasa.nlu.training_data.entities_parser as entities_parser + + +@pytest.mark.parametrize( + "example, expected_entities, expected_text", + [ + ( + "I need an [economy class](travel_flight_class:economy) ticket from " + '[boston]{"entity": "city", "role": "from"} to [new york]{"entity": "city",' + ' "role": "to"}, please.', + [ + { + "start": 10, + "end": 23, + "value": "economy", + "entity": "travel_flight_class", + }, + { + "start": 36, + "end": 42, + "value": "boston", + "entity": "city", + "role": "from", + }, + { + "start": 46, + "end": 54, + "value": "new york", + "entity": "city", + "role": "to", + }, + ], + "I need an economy class ticket from boston to new york, please.", + ), + ("i'm looking for a place to eat", [], "i'm looking for a place to eat"), + ( + "i'm looking for a place in the [north](loc-direction) of town", + [{"start": 31, "end": 36, "value": "north", "entity": "loc-direction"}], + "i'm looking for a place in the north of town", + ), + ( + "show me [chines](cuisine:chinese) restaurants", + [{"start": 8, "end": 14, "value": "chinese", "entity": "cuisine"}], + "show me chines restaurants", + ), + ( + 'show me [italian]{"entity": "cuisine", "value": "22_ab-34*3.A:43er*+?df"} ' + "restaurants", + [ + { + "start": 8, + "end": 15, + "value": "22_ab-34*3.A:43er*+?df", + "entity": "cuisine", + } + ], + "show me italian restaurants", + ), + ("Do you know {ABC} club?", [], "Do you know {ABC} club?"), + ( + "show me [chines](22_ab-34*3.A:43er*+?df) restaurants", + [{"start": 8, "end": 14, "value": "43er*+?df", "entity": "22_ab-34*3.A"}], + "show me chines restaurants", + ), + ( + 'I want to fly from [Berlin]{"entity": "city", "role": "to"} to [LA]{' + '"entity": "city", "role": "from", "value": "Los Angeles"}', + [ + { + "start": 19, + "end": 25, + "value": "Berlin", + "entity": "city", + "role": "to", + }, + { + "start": 29, + "end": 31, + "value": "Los Angeles", + "entity": "city", + "role": "from", + }, + ], + "I want to fly from Berlin to LA", + ), + ( + 'I want to fly from [Berlin](city) to [LA]{"entity": "city", "role": ' + '"from", "value": "Los Angeles"}', + [ + {"start": 19, "end": 25, "value": "Berlin", "entity": "city"}, + { + "start": 29, + "end": 31, + "value": "Los Angeles", + "entity": "city", + "role": "from", + }, + ], + "I want to fly from Berlin to LA", + ), + ], +) +def test_markdown_entity_regex( + example: Text, expected_entities: List[Dict[Text, Any]], expected_text: Text +): + + result = entities_parser.find_entities_in_training_example(example) + assert result == expected_entities + + replaced_text = entities_parser.replace_entities(example) + assert replaced_text == expected_text diff --git a/tests/nlu/training_data/test_lookup_tables_parser.py b/tests/nlu/training_data/test_lookup_tables_parser.py new file mode 100644 index 000000000000..e37bed62a32a --- /dev/null +++ b/tests/nlu/training_data/test_lookup_tables_parser.py @@ -0,0 +1,15 @@ +import rasa.nlu.training_data.lookup_tables_parser as lookup_tables_parser + + +def test_add_item_to_lookup_tables(): + lookup_item_title = "additional_currencies" + lookup_examples = ["Peso", "Euro", "Dollar"] + + result = [] + + for example in lookup_examples: + lookup_tables_parser.add_item_to_lookup_tables( + lookup_item_title, example, result + ) + + assert result == [{"name": lookup_item_title, "elements": lookup_examples}] diff --git a/tests/nlu/training_data/test_markdown_nlg.py b/tests/nlu/training_data/test_markdown_nlg.py new file mode 100644 index 000000000000..3a43396016e3 --- /dev/null +++ b/tests/nlu/training_data/test_markdown_nlg.py @@ -0,0 +1,15 @@ +from rasa.nlu.training_data.formats import NLGMarkdownReader + + +def test_markdownnlg_read_newlines(): + md = """ +## Ask something +* faq/ask_something + - Super answer in 2\\nlines + """ + reader = NLGMarkdownReader() + result = reader.reads(md) + + assert result.responses == { + "faq/ask_something": [{"text": "Super answer in 2\nlines"}] + } diff --git a/tests/nlu/training_data/test_message.py b/tests/nlu/training_data/test_message.py new file mode 100644 index 000000000000..43d88ee5ecf6 --- /dev/null +++ b/tests/nlu/training_data/test_message.py @@ -0,0 +1,234 @@ +from typing import Optional, Text, List + +import pytest +import numpy as np +import scipy.sparse + +from rasa.nlu.featurizers.featurizer import Features +from rasa.nlu.constants import TEXT, FEATURE_TYPE_SEQUENCE, FEATURE_TYPE_SENTENCE +from rasa.nlu.training_data import Message + + +@pytest.mark.parametrize( + "features, attribute, featurizers, expected_seq_features, expected_sen_features", + [ + (None, TEXT, [], None, None), + ( + [Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "test")], + TEXT, + [], + [1, 1, 0], + None, + ), + ( + [ + Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c2"), + Features(np.array([1, 2, 2]), FEATURE_TYPE_SENTENCE, TEXT, "c1"), + Features(np.array([1, 2, 1]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"), + ], + TEXT, + [], + [1, 2, 1, 1, 1, 0], + [1, 2, 2], + ), + ( + [ + Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"), + Features(np.array([1, 2, 1]), FEATURE_TYPE_SENTENCE, TEXT, "test"), + Features(np.array([1, 1, 1]), FEATURE_TYPE_SEQUENCE, TEXT, "test"), + ], + TEXT, + ["c1"], + [1, 1, 0], + None, + ), + ], +) +def test_get_dense_features( + features: Optional[List[Features]], + attribute: Text, + featurizers: List[Text], + expected_seq_features: Optional[List[Features]], + expected_sen_features: Optional[List[Features]], +): + + message = Message("This is a test sentence.", features=features) + + actual_seq_features, actual_sen_features = message.get_dense_features( + attribute, featurizers + ) + + assert np.all(actual_sen_features == expected_sen_features) + assert np.all(actual_seq_features == expected_seq_features) + + +@pytest.mark.parametrize( + "features, attribute, featurizers, expected_seq_features, expected_sen_features", + [ + (None, TEXT, [], None, None), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "test", + ) + ], + TEXT, + [], + [1, 1, 0], + None, + ), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c2", + ), + Features( + scipy.sparse.csr_matrix([1, 2, 2]), + FEATURE_TYPE_SENTENCE, + TEXT, + "c1", + ), + Features( + scipy.sparse.csr_matrix([1, 2, 1]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c1", + ), + ], + TEXT, + [], + [1, 2, 1, 1, 1, 0], + [1, 2, 2], + ), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c1", + ), + Features( + scipy.sparse.csr_matrix([1, 2, 1]), + FEATURE_TYPE_SENTENCE, + TEXT, + "test", + ), + Features( + scipy.sparse.csr_matrix([1, 1, 1]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "test", + ), + ], + TEXT, + ["c1"], + [1, 1, 0], + None, + ), + ], +) +def test_get_sparse_features( + features: Optional[List[Features]], + attribute: Text, + featurizers: List[Text], + expected_seq_features: Optional[List[Features]], + expected_sen_features: Optional[List[Features]], +): + message = Message("This is a test sentence.", features=features) + + actual_seq_features, actual_sen_features = message.get_sparse_features( + attribute, featurizers + ) + + if expected_seq_features is None: + assert actual_seq_features is None + else: + assert actual_seq_features is not None + assert np.all(actual_seq_features.toarray() == expected_seq_features) + + if expected_sen_features is None: + assert actual_sen_features is None + else: + assert actual_sen_features is not None + assert np.all(actual_sen_features.toarray() == expected_sen_features) + + +@pytest.mark.parametrize( + "features, attribute, featurizers, expected", + [ + (None, TEXT, [], False), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "test", + ) + ], + TEXT, + [], + True, + ), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c2", + ), + Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"), + ], + TEXT, + [], + True, + ), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c2", + ), + Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"), + ], + TEXT, + ["c1"], + True, + ), + ( + [ + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c2", + ), + Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"), + ], + TEXT, + ["other"], + False, + ), + ], +) +def test_features_present( + features: Optional[List[Features]], + attribute: Text, + featurizers: List[Text], + expected: bool, +): + message = Message("This is a test sentence.", features=features) + + actual = message.features_present(attribute, featurizers) + + assert actual == expected diff --git a/tests/nlu/training_data/test_synonyms_parser.py b/tests/nlu/training_data/test_synonyms_parser.py new file mode 100644 index 000000000000..ebd9cf060f13 --- /dev/null +++ b/tests/nlu/training_data/test_synonyms_parser.py @@ -0,0 +1,37 @@ +import rasa.nlu.training_data.synonyms_parser as synonyms_parser + + +def test_add_synonym(): + + synonym_name = "savings" + synonym_examples = ["pink pig", "savings account"] + expected_result = {"pink pig": synonym_name, "savings account": synonym_name} + + result = {} + + for example in synonym_examples: + synonyms_parser.add_synonym(example, synonym_name, result) + + assert result == expected_result + + +def test_add_synonyms_from_entities(): + + training_example = "I want to fly from Berlin to LA" + + entities = [ + {"start": 19, "end": 25, "value": "Berlin", "entity": "city", "role": "to"}, + { + "start": 29, + "end": 31, + "value": "Los Angeles", + "entity": "city", + "role": "from", + }, + ] + + result = {} + + synonyms_parser.add_synonyms_from_entities(training_example, entities, result) + + assert result == {"LA": "Los Angeles"} diff --git a/tests/nlu/base/test_training_data.py b/tests/nlu/training_data/test_training_data.py similarity index 60% rename from tests/nlu/base/test_training_data.py rename to tests/nlu/training_data/test_training_data.py index 0f27adcfa35e..100d0a4df2d2 100644 --- a/tests/nlu/base/test_training_data.py +++ b/tests/nlu/training_data/test_training_data.py @@ -1,56 +1,20 @@ -# -*- coding: utf-8 -*- +from typing import Text import pytest -import tempfile -from jsonschema import ValidationError +import rasa.utils.io as io_utils from rasa.nlu import training_data +from rasa.nlu.constants import TEXT, RESPONSE_KEY_ATTRIBUTE from rasa.nlu.convert import convert_training_data from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer from rasa.nlu.training_data import TrainingData -from rasa.nlu.training_data.formats import MarkdownReader -from rasa.nlu.training_data.formats.rasa import validate_rasa_nlu_data -from rasa.nlu.training_data.loading import guess_format, UNK, load_data +from rasa.nlu.training_data.loading import guess_format, UNK, RASA_YAML, JSON, MARKDOWN from rasa.nlu.training_data.util import get_file_format -import rasa.utils.io as io_utils - - -def test_example_training_data_is_valid(): - demo_json = "data/examples/rasa/demo-rasa.json" - data = io_utils.read_json_file(demo_json) - validate_rasa_nlu_data(data) - - -@pytest.mark.parametrize( - "invalid_data", - [ - {"wrong_top_level": []}, - ["this is not a toplevel dict"], - { - "rasa_nlu_data": { - "common_examples": [{"intent": "some example without text"}] - } - }, - { - "rasa_nlu_data": { - "common_examples": [ - { - "text": "mytext", - "entities": [{"start": "INVALID", "end": 0, "entity": "x"}], - } - ] - } - }, - ], -) -def test_validation_is_throwing_exceptions(invalid_data): - with pytest.raises(ValidationError): - validate_rasa_nlu_data(invalid_data) def test_luis_data(): - td = training_data.load_data("data/examples/luis/demo-restaurants.json") + td = training_data.load_data("data/examples/luis/demo-restaurants_v5.json") assert not td.is_empty() assert len(td.entity_examples) == 8 @@ -102,32 +66,41 @@ def test_lookup_table_json(): lookup_fname = "data/test/lookup_tables/plates.txt" td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.json") assert not td_lookup.is_empty() + assert len(td_lookup.lookup_tables) == 1 assert td_lookup.lookup_tables[0]["name"] == "plates" assert td_lookup.lookup_tables[0]["elements"] == lookup_fname - assert td_lookup.lookup_tables[1]["name"] == "drinks" - assert td_lookup.lookup_tables[1]["elements"] == [ - "mojito", - "lemonade", - "sweet berry wine", - "tea", - "club mate", - ] def test_lookup_table_md(): lookup_fname = "data/test/lookup_tables/plates.txt" td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.md") assert not td_lookup.is_empty() + assert len(td_lookup.lookup_tables) == 1 assert td_lookup.lookup_tables[0]["name"] == "plates" assert td_lookup.lookup_tables[0]["elements"] == lookup_fname - assert td_lookup.lookup_tables[1]["name"] == "drinks" - assert td_lookup.lookup_tables[1]["elements"] == [ - "mojito", - "lemonade", - "sweet berry wine", - "tea", - "club mate", - ] + + +def test_composite_entities_data(): + td = training_data.load_data("data/test/demo-rasa-composite-entities.md") + assert not td.is_empty() + assert len(td.entity_examples) == 11 + assert len(td.intent_examples) == 45 + assert len(td.training_examples) == 45 + assert td.entity_synonyms == {"SF": "San Fransisco"} + assert td.intents == { + "order_pizza", + "book_flight", + "chitchat", + "greet", + "goodbye", + "affirm", + } + assert td.entities == {"location", "topping", "size"} + assert td.entity_groups == {"1", "2"} + assert td.entity_roles == {"to", "from"} + assert td.number_of_examples_per_entity["entity 'location'"] == 8 + assert td.number_of_examples_per_entity["group '1'"] == 9 + assert td.number_of_examples_per_entity["role 'from'"] == 3 @pytest.mark.parametrize( @@ -149,12 +122,12 @@ def test_demo_data(files): td = training_data_from_paths(files, language="en") assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"} assert td.entities == {"location", "cuisine"} - assert td.responses == {"I am Mr. Bot", "It's sunny where I live"} + assert set(td.responses.keys()) == {"chitchat/ask_name", "chitchat/ask_weather"} assert len(td.training_examples) == 46 assert len(td.intent_examples) == 46 assert len(td.response_examples) == 4 assert len(td.entity_examples) == 11 - assert len(td.nlg_stories) == 2 + assert len(td.responses) == 2 assert td.entity_synonyms == { "Chines": "chinese", @@ -170,6 +143,37 @@ def test_demo_data(files): ] +@pytest.mark.parametrize( + "files", + [ + [ + "data/examples/rasa/demo-rasa.json", + "data/examples/rasa/demo-rasa-responses.md", + ], + [ + "data/examples/rasa/demo-rasa.md", + "data/examples/rasa/demo-rasa-responses.md", + ], + ], +) +def test_demo_data_filter_out_retrieval_intents(files): + from rasa.importers.utils import training_data_from_paths + + td = training_data_from_paths(files, language="en") + assert len(td.training_examples) == 46 + + td1 = td.filter_training_examples(lambda ex: ex.get(RESPONSE_KEY_ATTRIBUTE) is None) + assert len(td1.training_examples) == 42 + + td2 = td.filter_training_examples( + lambda ex: ex.get(RESPONSE_KEY_ATTRIBUTE) is not None + ) + assert len(td2.training_examples) == 4 + + # make sure filtering operation doesn't mutate the source training data + assert len(td.training_examples) == 46 + + @pytest.mark.parametrize( "filepaths", [["data/examples/rasa/demo-rasa.md", "data/examples/rasa/demo-rasa-responses.md"]], @@ -178,15 +182,54 @@ def test_train_test_split(filepaths): from rasa.importers.utils import training_data_from_paths td = training_data_from_paths(filepaths, language="en") + assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"} assert td.entities == {"location", "cuisine"} + assert set(td.responses.keys()) == {"chitchat/ask_name", "chitchat/ask_weather"} + assert len(td.training_examples) == 46 assert len(td.intent_examples) == 46 + assert len(td.response_examples) == 4 td_train, td_test = td.train_test_split(train_frac=0.8) - assert len(td_train.training_examples) == 35 - assert len(td_test.training_examples) == 11 + assert len(td_test.training_examples) + len(td_train.training_examples) == 46 + assert len(td_train.training_examples) == 34 + assert len(td_test.training_examples) == 12 + + assert len(td.number_of_examples_per_intent.keys()) == len( + td_test.number_of_examples_per_intent.keys() + ) + assert len(td.number_of_examples_per_intent.keys()) == len( + td_train.number_of_examples_per_intent.keys() + ) + assert len(td.number_of_examples_per_response.keys()) == len( + td_test.number_of_examples_per_response.keys() + ) + assert len(td.number_of_examples_per_response.keys()) == len( + td_train.number_of_examples_per_response.keys() + ) + + +@pytest.mark.parametrize( + "filepaths", + [["data/examples/rasa/demo-rasa.md", "data/examples/rasa/demo-rasa-responses.md"]], +) +def test_train_test_split_with_random_seed(filepaths): + from rasa.importers.utils import training_data_from_paths + + td = training_data_from_paths(filepaths, language="en") + + td_train_1, td_test_1 = td.train_test_split(train_frac=0.8, random_seed=1) + td_train_2, td_test_2 = td.train_test_split(train_frac=0.8, random_seed=1) + train_1_intent_examples = [e.text for e in td_train_1.intent_examples] + train_2_intent_examples = [e.text for e in td_train_2.intent_examples] + + test_1_intent_examples = [e.text for e in td_test_1.intent_examples] + test_2_intent_examples = [e.text for e in td_test_2.intent_examples] + + assert train_1_intent_examples == train_2_intent_examples + assert test_1_intent_examples == test_2_intent_examples @pytest.mark.parametrize( @@ -194,6 +237,7 @@ def test_train_test_split(filepaths): [ ("data/examples/rasa/demo-rasa.json", "data/test/multiple_files_json"), ("data/examples/rasa/demo-rasa.md", "data/test/multiple_files_markdown"), + ("data/examples/rasa/demo-rasa.md", "data/test/duplicate_intents_markdown"), ], ) def test_data_merging(files): @@ -220,7 +264,7 @@ def test_markdown_single_sections(): assert td_syn_only.entity_synonyms == {"Chines": "chinese", "Chinese": "chinese"} -def test_repeated_entities(): +def test_repeated_entities(tmp_path): data = """ { "rasa_nlu_data": { @@ -240,21 +284,20 @@ def test_repeated_entities(): ] } }""" - with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f: - f.write(data.encode("utf-8")) - f.flush() - td = training_data.load_data(f.name) - assert len(td.entity_examples) == 1 - example = td.entity_examples[0] - entities = example.get("entities") - assert len(entities) == 1 - tokens = WhitespaceTokenizer().tokenize(example.text) - start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens) - assert start == 9 - assert end == 10 - - -def test_multiword_entities(): + f = tmp_path / "tmp_training_data.json" + f.write_text(data, io_utils.DEFAULT_ENCODING) + td = training_data.load_data(str(f)) + assert len(td.entity_examples) == 1 + example = td.entity_examples[0] + entities = example.get("entities") + assert len(entities) == 1 + tokens = WhitespaceTokenizer().tokenize(example, attribute=TEXT) + start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens) + assert start == 9 + assert end == 10 + + +def test_multiword_entities(tmp_path): data = """ { "rasa_nlu_data": { @@ -274,24 +317,23 @@ def test_multiword_entities(): ] } }""" - with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f: - f.write(data.encode("utf-8")) - f.flush() - td = training_data.load_data(f.name) - assert len(td.entity_examples) == 1 - example = td.entity_examples[0] - entities = example.get("entities") - assert len(entities) == 1 - tokens = WhitespaceTokenizer().tokenize(example.text) - start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens) - assert start == 4 - assert end == 7 - - -def test_nonascii_entities(): + f = tmp_path / "tmp_training_data.json" + f.write_text(data, io_utils.DEFAULT_ENCODING) + td = training_data.load_data(str(f)) + assert len(td.entity_examples) == 1 + example = td.entity_examples[0] + entities = example.get("entities") + assert len(entities) == 1 + tokens = WhitespaceTokenizer().tokenize(example, attribute=TEXT) + start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens) + assert start == 4 + assert end == 7 + + +def test_nonascii_entities(tmp_path): data = """ { - "luis_schema_version": "2.0", + "luis_schema_version": "5.0", "utterances" : [ { "text": "I am looking for a ßäæ ?€ö) item", @@ -306,22 +348,21 @@ def test_nonascii_entities(): } ] }""" - with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f: - f.write(data.encode("utf-8")) - f.flush() - td = training_data.load_data(f.name) - assert len(td.entity_examples) == 1 - example = td.entity_examples[0] - entities = example.get("entities") - assert len(entities) == 1 - entity = entities[0] - assert entity["value"] == "ßäæ ?€ö)" - assert entity["start"] == 19 - assert entity["end"] == 27 - assert entity["entity"] == "description" - - -def test_entities_synonyms(): + f = tmp_path / "tmp_training_data.json" + f.write_text(data, io_utils.DEFAULT_ENCODING) + td = training_data.load_data(str(f)) + assert len(td.entity_examples) == 1 + example = td.entity_examples[0] + entities = example.get("entities") + assert len(entities) == 1 + entity = entities[0] + assert entity["value"] == "ßäæ ?€ö)" + assert entity["start"] == 19 + assert entity["end"] == 27 + assert entity["entity"] == "description" + + +def test_entities_synonyms(tmp_path): data = """ { "rasa_nlu_data": { @@ -359,11 +400,10 @@ def test_entities_synonyms(): ] } }""" - with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f: - f.write(data.encode("utf-8")) - f.flush() - td = training_data.load_data(f.name) - assert td.entity_synonyms["New York City"] == "nyc" + f = tmp_path / "tmp_training_data.json" + f.write_text(data, io_utils.DEFAULT_ENCODING) + td = training_data.load_data(str(f)) + assert td.entity_synonyms["New York City"] == "nyc" def cmp_message_list(firsts, seconds): @@ -380,8 +420,8 @@ def cmp_dict_list(firsts, seconds): del seconds[idx] break else: - others = ", ".join([e.text for e in seconds]) - assert False, "Failed to find message {} in {}".format(a.text, others) + others = ", ".join(e.text for e in seconds) + assert False, f"Failed to find message {a.text} in {others}" return not seconds @@ -395,7 +435,7 @@ def cmp_dict_list(firsts, seconds): None, ), ( - "data/examples/luis/demo-restaurants.json", + "data/examples/luis/demo-restaurants_v5.json", "data/test/luis_converted_to_rasa.json", "json", None, @@ -461,112 +501,50 @@ def test_training_data_conversion( # f.write(td.as_json(indent=2)) -def test_url_data_format(): - data = """ - { - "rasa_nlu_data": { - "entity_synonyms": [ - { - "value": "nyc", - "synonyms": ["New York City", "nyc", "the big apple"] - } - ], - "common_examples" : [ - { - "text": "show me flights to New York City", - "intent": "unk", - "entities": [ - { - "entity": "destination", - "start": 19, - "end": 32, - "value": "NYC" - } - ] - } - ] - } - }""" - fname = io_utils.create_temporary_file( - data.encode("utf-8"), suffix="_tmp_training_data.json", mode="w+b" - ) - data = io_utils.read_json_file(fname) - assert data is not None - validate_rasa_nlu_data(data) - - -def test_markdown_entity_regex(): - r = MarkdownReader() - - md = """ -## intent:restaurant_search -- i'm looking for a place to eat -- i'm looking for a place in the [north](loc-direction) of town -- show me [chines](cuisine:chinese) restaurants -- show me [chines](22_ab-34*3.A:43er*+?df) restaurants - """ - - result = r.reads(md) - - assert len(result.training_examples) == 4 - first = result.training_examples[0] - assert first.data == {"intent": "restaurant_search"} - assert first.text == "i'm looking for a place to eat" - - second = result.training_examples[1] - assert second.data == { - "intent": "restaurant_search", - "entities": [ - {"start": 31, "end": 36, "value": "north", "entity": "loc-direction"} - ], - } - assert second.text == "i'm looking for a place in the north of town" - - third = result.training_examples[2] - assert third.data == { - "intent": "restaurant_search", - "entities": [{"start": 8, "end": 14, "value": "chinese", "entity": "cuisine"}], - } - assert third.text == "show me chines restaurants" - - fourth = result.training_examples[3] - assert fourth.data == { - "intent": "restaurant_search", - "entities": [ - {"start": 8, "end": 14, "value": "43er*+?df", "entity": "22_ab-34*3.A"} - ], - } - assert fourth.text == "show me chines restaurants" - - -def test_get_file_format(): - fformat = get_file_format("data/examples/luis/demo-restaurants.json") - - assert fformat == "json" - - fformat = get_file_format("data/examples") - - assert fformat == "json" - - fformat = get_file_format("examples/restaurantbot/data/nlu.md") - - assert fformat == "md" +@pytest.mark.parametrize( + "data_file,expected_format", + [ + ("data/examples/luis/demo-restaurants_v5.json", JSON), + ("data/examples", JSON), + ("data/examples/rasa/demo-rasa.md", MARKDOWN), + ("data/rasa_yaml_examples", RASA_YAML), + ], +) +def test_get_supported_file_format(data_file: Text, expected_format: Text): + fformat = get_file_format(data_file) + assert fformat == expected_format - with pytest.raises(AttributeError): - get_file_format("path-does-not-exists") +@pytest.mark.parametrize("data_file", ["path-does-not-exists", None]) +def test_get_non_existing_file_format_raises(data_file: Text): with pytest.raises(AttributeError): - get_file_format(None) + get_file_format(data_file) def test_guess_format_from_non_existing_file_path(): assert guess_format("not existing path") == UNK -def test_load_data_from_non_existing_file(): - with pytest.raises(ValueError): - load_data("some path") - - def test_is_empty(): assert TrainingData().is_empty() + + +def test_custom_attributes(tmp_path): + data = """ +{ + "rasa_nlu_data": { + "common_examples" : [ + { + "intent": "happy", + "text": "I'm happy.", + "sentiment": 0.8 + } + ] + } +}""" + f = tmp_path / "tmp_training_data.json" + f.write_text(data, io_utils.DEFAULT_ENCODING) + td = training_data.load_data(str(f)) + assert len(td.training_examples) == 1 + example = td.training_examples[0] + assert example.get("sentiment") == 0.8 diff --git a/tests/nlu/utilities.py b/tests/nlu/utilities.py index 51bf413dc59a..dd55d9e636ac 100644 --- a/tests/nlu/utilities.py +++ b/tests/nlu/utilities.py @@ -1,23 +1,9 @@ import tempfile - -import pytest import ruamel.yaml as yaml -from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Interpreter from rasa.nlu.train import train -slowtest = pytest.mark.slowtest - - -def base_test_conf(pipeline_template): - # 'response_log': temp_log_file_dir(), - # 'port': 5022, - # "path": tempfile.mkdtemp(), - # "data": "./data/test/demo-rasa-small.json" - - return RasaNLUModelConfig({"pipeline": pipeline_template}) - def write_file_config(file_config): with tempfile.NamedTemporaryFile( @@ -36,11 +22,7 @@ async def interpreter_for(component_builder, data, path, config): return interpreter -def temp_log_file_dir(): - return tempfile.mkdtemp(suffix="_rasa_nlu_logs") - - -class ResponseTest(object): +class ResponseTest: def __init__(self, endpoint, expected_response, payload=None): self.endpoint = endpoint self.expected_response = expected_response diff --git a/tests/nlu/utils/__init__.py b/tests/nlu/utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/nlu/utils/test_bilou_utils.py b/tests/nlu/utils/test_bilou_utils.py new file mode 100644 index 000000000000..15dfccf68e0e --- /dev/null +++ b/tests/nlu/utils/test_bilou_utils.py @@ -0,0 +1,198 @@ +import logging +from typing import Text, List, Optional +from _pytest.logging import LogCaptureFixture +import pytest + +import rasa.nlu.utils.bilou_utils as bilou_utils +from rasa.nlu.constants import BILOU_ENTITIES, ENTITIES +from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer +from rasa.nlu.training_data import TrainingData, Message + + +@pytest.mark.parametrize( + "tag, expected", + [ + ("B-person", "person"), + ("I-location", "location"), + ("location", "location"), + ("U-company", "company"), + ("L-company", "company"), + ], +) +def test_entity_name_from_tag(tag, expected): + actual = bilou_utils.tag_without_prefix(tag) + + assert actual == expected + + +@pytest.mark.parametrize( + "tag, expected", + [ + ("B-person", "B-"), + ("I-location", "I-"), + ("location", None), + ("U-company", "U-"), + ("L-company", "L-"), + ("O-company", None), + ], +) +def test_bilou_from_tag(tag, expected): + actual = bilou_utils.bilou_prefix_from_tag(tag) + + assert actual == expected + + +def test_tags_to_ids(): + message = Message("Germany is part of the European Union") + message.set( + BILOU_ENTITIES, + ["U-location", "O", "O", "O", "O", "B-organisation", "L-organisation"], + ) + + tag_id_dict = {"O": 0, "U-location": 1, "B-organisation": 2, "L-organisation": 3} + + tags = bilou_utils.bilou_tags_to_ids(message, tag_id_dict) + + assert tags == [1, 0, 0, 0, 0, 2, 3] + + +def test_remove_bilou_prefixes(): + actual = bilou_utils.remove_bilou_prefixes( + ["U-location", "O", "O", "O", "O", "B-organisation", "L-organisation"] + ) + + assert actual == ["location", "O", "O", "O", "O", "organisation", "organisation"] + + +def test_build_tag_id_dict(): + message_1 = Message("Germany is part of the European Union") + message_1.set( + BILOU_ENTITIES, + ["U-location", "O", "O", "O", "O", "B-organisation", "L-organisation"], + ) + + message_2 = Message("Berlin is the capital of Germany") + message_2.set(BILOU_ENTITIES, ["U-location", "O", "O", "O", "O", "U-location"]) + + training_data = TrainingData([message_1, message_2]) + + tag_id_dict = bilou_utils.build_tag_id_dict(training_data) + + assert tag_id_dict == { + "O": 0, + "B-location": 1, + "I-location": 2, + "L-location": 3, + "U-location": 4, + "B-organisation": 5, + "I-organisation": 6, + "L-organisation": 7, + "U-organisation": 8, + } + + +def test_apply_bilou_schema(): + tokenizer = WhitespaceTokenizer() + + message_1 = Message("Germany is part of the European Union") + message_1.set( + ENTITIES, + [ + {"start": 0, "end": 7, "value": "Germany", "entity": "location"}, + { + "start": 23, + "end": 37, + "value": "European Union", + "entity": "organisation", + }, + ], + ) + + message_2 = Message("Berlin is the capital of Germany") + message_2.set( + ENTITIES, + [ + {"start": 0, "end": 6, "value": "Berlin", "entity": "location"}, + {"start": 25, "end": 32, "value": "Germany", "entity": "location"}, + ], + ) + + training_data = TrainingData([message_1, message_2]) + + tokenizer.train(training_data) + + bilou_utils.apply_bilou_schema(training_data) + + assert message_1.get(BILOU_ENTITIES) == [ + "U-location", + "O", + "O", + "O", + "O", + "B-organisation", + "L-organisation", + ] + assert message_2.get(BILOU_ENTITIES) == [ + "U-location", + "O", + "O", + "O", + "O", + "U-location", + ] + + +@pytest.mark.parametrize( + "tags, expected_tags, debug_message", + [ + ( + ["O", "B-person", "I-person", "L-person", "O", "U-person", "O"], + ["O", "B-person", "I-person", "L-person", "O", "U-person", "O"], + None, + ), + ( + ["O", "B-person", "B-location", "I-location", "O"], + ["O", "U-person", "B-location", "L-location", "O"], + "B- tag not closed", + ), + ( + ["O", "B-person", "I-location", "L-person"], + ["O", "B-person", "I-person", "L-person"], + "B- tag, L- tag pair encloses multiple entity classes", + ), + (["O", "B-person", "O"], ["O", "U-person", "O"], "B- tag not closed"), + (["O", "B-person"], ["O", "U-person"], "B- tag not closed"), + ( + ["O", "B-person", "I-person"], + ["O", "B-person", "L-person"], + "B- tag not closed", + ), + ( + ["O", "B-person", "I-location"], + ["O", "B-person", "L-person"], + "B- tag not closed", + ), + ( + ["O", "B-person", "B-location"], + ["O", "U-person", "U-location"], + "B- tag not closed", + ), + ], +) +def test_check_consistent_bilou_tagging( + tags: List[Text], + expected_tags: List[Text], + debug_message: Optional[Text], + caplog: LogCaptureFixture, +): + + with caplog.at_level(logging.DEBUG): + actual_tags = bilou_utils.ensure_consistent_bilou_tagging(tags) + + if debug_message: + assert len(caplog.records) > 0 + assert debug_message in caplog.text + else: + assert len(caplog.records) == 0 + + assert actual_tags == expected_tags diff --git a/tests/nlu/utils/test_pattern_utils.py b/tests/nlu/utils/test_pattern_utils.py new file mode 100644 index 000000000000..b3cb9afc2143 --- /dev/null +++ b/tests/nlu/utils/test_pattern_utils.py @@ -0,0 +1,169 @@ +from typing import Dict, List, Text + +import pytest + +import rasa.nlu.utils.pattern_utils as pattern_utils +from rasa.nlu.training_data import Message +from rasa.nlu.training_data import TrainingData + + +@pytest.mark.parametrize( + "lookup_tables, regex_features, expected_patterns", + [ + ( + {"name": "person", "elements": ["Max", "John"]}, + {}, + [{"name": "person", "pattern": "(\\bMax\\b|\\bJohn\\b)"}], + ), + ({}, {}, []), + ( + {}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + [{"name": "zipcode", "pattern": "[0-9]{5}"}], + ), + ( + {"name": "person", "elements": ["Max", "John"]}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + [ + {"name": "zipcode", "pattern": "[0-9]{5}"}, + {"name": "person", "pattern": "(\\bMax\\b|\\bJohn\\b)"}, + ], + ), + ( + {"name": "plates", "elements": "data/test/lookup_tables/plates.txt"}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + [ + {"name": "zipcode", "pattern": "[0-9]{5}"}, + { + "name": "plates", + "pattern": "(\\btacos\\b|\\bbeef\\b|\\bmapo\\ tofu\\b|\\bburrito\\b|\\blettuce\\ wrap\\b)", + }, + ], + ), + ], +) +def test_extract_patterns( + lookup_tables: Dict[Text, List[Text]], + regex_features: Dict[Text, Text], + expected_patterns: Dict[Text, Text], +): + training_data = TrainingData() + if lookup_tables: + training_data.lookup_tables = [lookup_tables] + if regex_features: + training_data.regex_features = [regex_features] + + actual_patterns = pattern_utils.extract_patterns(training_data) + + assert actual_patterns == expected_patterns + + +@pytest.mark.parametrize( + "entity, regex_features, expected_patterns", + [ + ("", {}, []), + ( + "zipcode", + {"name": "zipcode", "pattern": "[0-9]{5}"}, + [{"name": "zipcode", "pattern": "[0-9]{5}"}], + ), + ("entity", {"name": "zipcode", "pattern": "[0-9]{5}"}, []), + ], +) +def test_extract_patterns_use_only_entities_regexes( + entity: Text, regex_features: Dict[Text, Text], expected_patterns: Dict[Text, Text] +): + training_data = TrainingData() + if entity: + training_data.training_examples = [ + Message("text", data={"entities": [{"entity": entity, "value": "text"}]}) + ] + if regex_features: + training_data.regex_features = [regex_features] + + actual_patterns = pattern_utils.extract_patterns( + training_data, use_only_entities=True + ) + + assert actual_patterns == expected_patterns + + +@pytest.mark.parametrize( + "entity, lookup_tables, expected_patterns", + [ + ("", {}, []), + ( + "person", + {"name": "person", "elements": ["Max", "John"]}, + [{"name": "person", "pattern": "(\\bMax\\b|\\bJohn\\b)"}], + ), + ("entity", {"name": "person", "elements": ["Max", "John"]}, []), + ], +) +def test_extract_patterns_use_only_entities_lookup_tables( + entity: Text, lookup_tables: Dict[Text, Text], expected_patterns: Dict[Text, Text] +): + training_data = TrainingData() + if entity: + training_data.training_examples = [ + Message("text", data={"entities": [{"entity": entity, "value": "text"}]}) + ] + if lookup_tables: + training_data.lookup_tables = [lookup_tables] + + actual_patterns = pattern_utils.extract_patterns( + training_data, use_only_entities=True + ) + + assert actual_patterns == expected_patterns + + +@pytest.mark.parametrize( + "lookup_tables, regex_features, use_lookup_tables, use_regex_features, expected_patterns", + [ + ({"name": "person", "elements": ["Max", "John"]}, {}, False, True, []), + ({}, {}, True, True, []), + ({}, {"name": "zipcode", "pattern": "[0-9]{5}"}, True, False, []), + ( + {"name": "person", "elements": ["Max", "John"]}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + False, + False, + [], + ), + ( + {"name": "person", "elements": ["Max", "John"]}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + True, + False, + [{"name": "person", "pattern": "(\\bMax\\b|\\bJohn\\b)"}], + ), + ( + {"name": "person", "elements": ["Max", "John"]}, + {"name": "zipcode", "pattern": "[0-9]{5}"}, + False, + True, + [{"name": "zipcode", "pattern": "[0-9]{5}"}], + ), + ], +) +def test_extract_patterns_use_only_lookup_tables_or_regex_features( + lookup_tables: Dict[Text, List[Text]], + regex_features: Dict[Text, Text], + use_lookup_tables: bool, + use_regex_features: bool, + expected_patterns: Dict[Text, Text], +): + training_data = TrainingData() + if lookup_tables: + training_data.lookup_tables = [lookup_tables] + if regex_features: + training_data.regex_features = [regex_features] + + actual_patterns = pattern_utils.extract_patterns( + training_data, + use_lookup_tables=use_lookup_tables, + use_regexes=use_regex_features, + ) + + assert actual_patterns == expected_patterns diff --git a/tests/test_data.py b/tests/test_data.py new file mode 100644 index 000000000000..22b3f9ad0052 --- /dev/null +++ b/tests/test_data.py @@ -0,0 +1,70 @@ +import glob +import os + +from pathlib import Path + +from rasa.constants import DEFAULT_E2E_TESTS_PATH +from rasa import data +from rasa.utils.io import write_text_file + + +def test_story_file_can_not_be_yml(tmpdir: Path): + p = tmpdir / "test_non_md.yml" + Path(p).touch() + assert data.is_story_file(str()) is False + + +def test_empty_story_file_is_not_story_file(tmpdir: Path): + p = tmpdir / "test_non_md.md" + Path(p).touch() + assert data.is_story_file(str(p)) is False + + +def test_story_file_with_minimal_story_is_story_file(tmpdir: Path): + p = tmpdir / "story.md" + s = """ +## my story + """ + write_text_file(s, p) + assert data.is_story_file(str(p)) + + +def test_default_story_files_are_story_files(): + for fn in glob.glob(os.path.join("data", "test_stories", "*")): + assert data.is_story_file(fn) + + +def test_default_conversation_tests_are_conversation_tests(tmpdir: Path): + parent = tmpdir / DEFAULT_E2E_TESTS_PATH + Path(parent).mkdir(parents=True) + + e2e_path = parent / "conversation_tests.md" + e2e_story = """## my story test""" + write_text_file(e2e_story, e2e_path) + + assert data.is_end_to_end_conversation_test_file(str(e2e_path)) + + +def test_nlu_data_files_are_not_conversation_tests(tmpdir: Path): + parent = tmpdir / DEFAULT_E2E_TESTS_PATH + Path(parent).mkdir(parents=True) + + nlu_path = parent / "nlu.md" + nlu_data = """ +## intent: greet +- hello +- hi +- hallo + """ + write_text_file(nlu_data, nlu_path) + + assert not data.is_end_to_end_conversation_test_file(str(nlu_path)) + + +def test_domain_files_are_not_conversation_tests(tmpdir: Path): + parent = tmpdir / DEFAULT_E2E_TESTS_PATH + Path(parent).mkdir(parents=True) + + domain_path = parent / "domain.yml" + + assert not data.is_end_to_end_conversation_test_file(str(domain_path)) diff --git a/tests/test_server.py b/tests/test_server.py index aac8d7754b03..4eed88a92652 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -1,27 +1,43 @@ -# -*- coding: utf-8 -*- import os +from multiprocessing.managers import DictProxy +from pathlib import Path +from unittest.mock import Mock, ANY + +import requests +import time import tempfile import uuid -from typing import List, Text, Type +from typing import List, Text, Type, Generator, NoReturn, Dict +from contextlib import ExitStack + +from _pytest import pathlib from aioresponses import aioresponses import pytest from freezegun import freeze_time from mock import MagicMock +from multiprocessing import Process, Manager import rasa import rasa.constants +import rasa.utils.io +import rasa.server from rasa.core import events, utils +from rasa.core.agent import Agent from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput from rasa.core.channels.slack import SlackBot from rasa.core.events import Event, UserUttered, SlotSet, BotUttered from rasa.core.trackers import DialogueStateTracker from rasa.model import unpack_model +from rasa.nlu.constants import INTENT_NAME_KEY from rasa.utils.endpoints import EndpointConfig +from rasa import utils as rasa_utils from sanic import Sanic from sanic.testing import SanicTestClient from tests.nlu.utilities import ResponseTest +from tests.conftest import get_test_client +from ruamel.yaml import StringIO # a couple of event instances that we can use for testing @@ -31,7 +47,7 @@ "event": UserUttered.type_name, "text": "/goodbye", "parse_data": { - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "entities": [], }, } @@ -46,27 +62,27 @@ @pytest.fixture def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient: - return rasa_server_without_api.test_client + return get_test_client(rasa_server_without_api) @pytest.fixture def rasa_app(rasa_server: Sanic) -> SanicTestClient: - return rasa_server.test_client + return get_test_client(rasa_server) @pytest.fixture def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient: - return rasa_nlu_server.test_client + return get_test_client(rasa_nlu_server) @pytest.fixture def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient: - return rasa_core_server.test_client + return get_test_client(rasa_core_server) @pytest.fixture def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient: - return rasa_server_secured.test_client + return get_test_client(rasa_server_secured) def test_root(rasa_app: SanicTestClient): @@ -76,6 +92,7 @@ def test_root(rasa_app: SanicTestClient): def test_root_without_enable_api(rasa_app_without_api: SanicTestClient): + _, response = rasa_app_without_api.get("/") assert response.status == 200 assert response.text.startswith("Hello from Rasa:") @@ -98,18 +115,22 @@ def test_version(rasa_app: SanicTestClient): ) -def test_status(rasa_app: SanicTestClient): +def test_status(rasa_app: SanicTestClient, trained_rasa_model: Text): _, response = rasa_app.get("/status") + model_file = response.json["model_file"] assert response.status == 200 assert "fingerprint" in response.json - assert "model_file" in response.json + assert os.path.isfile(model_file) + assert model_file == trained_rasa_model -def test_status_nlu_only(rasa_app_nlu: SanicTestClient): +def test_status_nlu_only(rasa_app_nlu: SanicTestClient, trained_nlu_model: Text): _, response = rasa_app_nlu.get("/status") + model_file = response.json["model_file"] assert response.status == 200 assert "fingerprint" in response.json assert "model_file" in response.json + assert model_file == trained_nlu_model def test_status_secured(rasa_secured_app: SanicTestClient): @@ -123,6 +144,130 @@ def test_status_not_ready_agent(rasa_app: SanicTestClient): assert response.status == 409 +@pytest.fixture +def shared_statuses() -> DictProxy: + return Manager().dict() + + +@pytest.fixture +def background_server( + shared_statuses: DictProxy, tmpdir: pathlib.Path +) -> Generator[Process, None, None]: + # Create a fake model archive which the mocked train function can return + + fake_model = Path(tmpdir) / "fake_model.tar.gz" + fake_model.touch() + fake_model_path = str(fake_model) + + # Fake training function which blocks until we tell it to stop blocking + # If we can send a status request while this is blocking, we can be sure that the + # actual training is also not blocking + def mocked_training_function(*_, **__) -> Text: + # Tell the others that we are now blocking + shared_statuses["started_training"] = True + # Block until somebody tells us to not block anymore + while shared_statuses.get("stop_training") is not True: + time.sleep(1) + + return fake_model_path + + def run_server() -> NoReturn: + import rasa + + rasa.train = mocked_training_function + + from rasa import __main__ + import sys + + sys.argv = ["rasa", "run", "--enable-api"] + __main__.main() + + server = Process(target=run_server) + yield server + server.terminate() + + +@pytest.fixture() +def training_request(shared_statuses: DictProxy) -> Generator[Process, None, None]: + def send_request() -> None: + payload = "" + project_path = Path("examples") / "formbot" + for file in [ + "domain.yml", + "config.yml", + Path("data") / "rules.yml", + Path("data") / "stories.yml", + Path("data") / "nlu.yml", + ]: + full_path = project_path / file + payload += full_path.read_text() + + response = requests.post( + "http://localhost:5005/model/train", + data=payload, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + params={"force_training": True}, + ) + shared_statuses["training_result"] = response.status_code + + train_request = Process(target=send_request) + yield train_request + train_request.terminate() + + +# Due to unknown reasons this test can not be run in pycharm, it +# results in segfaults...will skip in that case - test will still get run on CI. +# It also doesn't run on Windows because of Process-related calls and an attempt +# to start/terminate a process. We will investigate this case further later: +# https://github.com/RasaHQ/rasa/issues/6302 +@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault") +@pytest.mark.skip_on_windows +def test_train_status_is_not_blocked_by_training( + background_server: Process, shared_statuses: DictProxy, training_request: Process +): + background_server.start() + + def is_server_ready() -> bool: + try: + return requests.get("http://localhost:5005/status").status_code == 200 + except Exception: + return False + + # wait until server is up before sending train request and status test loop + start = time.time() + while not is_server_ready() and time.time() - start < 60: + time.sleep(1) + + assert is_server_ready() + + training_request.start() + + # Wait until the blocking training function was called + while shared_statuses.get("started_training") is not True: + time.sleep(1) + + # Check if the number of currently running trainings was incremented + response = requests.get("http://localhost:5005/status") + assert response.status_code == 200 + assert response.json()["num_active_training_jobs"] == 1 + + # Tell the blocking training function to stop + shared_statuses["stop_training"] = True + + start = time.time() + while shared_statuses.get("training_result") is None and time.time() - start < 60: + time.sleep(1) + assert shared_statuses.get("training_result") + + # Check that the training worked correctly + assert shared_statuses["training_result"] == 200 + + # Check if the number of currently running trainings was decremented + response = requests.get("http://localhost:5005/status") + assert response.status_code == 200 + assert response.json()["num_active_training_jobs"] == 0 + + @pytest.mark.parametrize( "response_test", [ @@ -130,7 +275,7 @@ def test_status_not_ready_agent(rasa_app: SanicTestClient): "/model/parse", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello", }, payload={"text": "hello"}, @@ -139,7 +284,7 @@ def test_status_not_ready_agent(rasa_app: SanicTestClient): "/model/parse", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello", }, payload={"text": "hello"}, @@ -148,14 +293,14 @@ def test_status_not_ready_agent(rasa_app: SanicTestClient): "/model/parse", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello ńöñàśçií", }, payload={"text": "hello ńöñàśçií"}, ), ], ) -def test_parse(rasa_app, response_test): +def test_parse(rasa_app: SanicTestClient, response_test: ResponseTest): _, response = rasa_app.post(response_test.endpoint, json=response_test.payload) rjs = response.json assert response.status == 200 @@ -172,7 +317,7 @@ def test_parse(rasa_app, response_test): "/model/parse?emulation_mode=wit", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello", }, payload={"text": "hello"}, @@ -181,7 +326,7 @@ def test_parse(rasa_app, response_test): "/model/parse?emulation_mode=dialogflow", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello", }, payload={"text": "hello"}, @@ -190,14 +335,16 @@ def test_parse(rasa_app, response_test): "/model/parse?emulation_mode=luis", { "entities": [], - "intent": {"confidence": 1.0, "name": "greet"}, + "intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"}, "text": "hello ńöñàśçií", }, payload={"text": "hello ńöñàśçií"}, ), ], ) -def test_parse_with_different_emulation_mode(rasa_app, response_test): +def test_parse_with_different_emulation_mode( + rasa_app: SanicTestClient, response_test: ResponseTest +): _, response = rasa_app.post(response_test.endpoint, json=response_test.payload) assert response.status == 200 @@ -218,28 +365,24 @@ def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient): def test_train_stack_success( - rasa_app, - default_domain_path, - default_stories_file, - default_stack_config, - default_nlu_data, + rasa_app: SanicTestClient, + default_domain_path: Text, + default_stories_file: Text, + default_stack_config: Text, + default_nlu_data: Text, ): - domain_file = open(default_domain_path) - config_file = open(default_stack_config) - stories_file = open(default_stories_file) - nlu_file = open(default_nlu_data) - - payload = dict( - domain=domain_file.read(), - config=config_file.read(), - stories=stories_file.read(), - nlu=nlu_file.read(), - ) - - domain_file.close() - config_file.close() - stories_file.close() - nlu_file.close() + with ExitStack() as stack: + domain_file = stack.enter_context(open(default_domain_path)) + config_file = stack.enter_context(open(default_stack_config)) + stories_file = stack.enter_context(open(default_stories_file)) + nlu_file = stack.enter_context(open(default_nlu_data)) + + payload = dict( + domain=domain_file.read(), + config=config_file.read(), + stories=stories_file.read(), + nlu=nlu_file.read(), + ) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 200 @@ -258,20 +401,28 @@ def test_train_stack_success( def test_train_nlu_success( - rasa_app, default_stack_config, default_nlu_data, default_domain_path + rasa_app: SanicTestClient, + default_stack_config: Text, + default_nlu_data: Text, + default_domain_path: Text, ): - domain_file = open(default_domain_path) - config_file = open(default_stack_config) - nlu_file = open(default_nlu_data) + domain_data = rasa_utils.io.read_yaml_file(default_domain_path) + config_data = rasa_utils.io.read_yaml_file(default_stack_config) + nlu_data = rasa_utils.io.read_yaml_file(default_nlu_data) - payload = dict( - domain=domain_file.read(), config=config_file.read(), nlu=nlu_file.read() - ) + # combine all data into our payload + payload = { + key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items() + } - config_file.close() - nlu_file.close() + data = StringIO() + rasa_utils.io.write_yaml(payload, data) - _, response = rasa_app.post("/model/train", json=payload) + _, response = rasa_app.post( + "/model/train", + data=data.getvalue(), + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + ) assert response.status == 200 # save model to temporary file @@ -286,18 +437,21 @@ def test_train_nlu_success( def test_train_core_success( - rasa_app, default_stack_config, default_stories_file, default_domain_path + rasa_app: SanicTestClient, + default_stack_config: Text, + default_stories_file: Text, + default_domain_path: Text, ): - domain_file = open(default_domain_path) - config_file = open(default_stack_config) - core_file = open(default_stories_file) - - payload = dict( - domain=domain_file.read(), config=config_file.read(), nlu=core_file.read() - ) + with ExitStack() as stack: + domain_file = stack.enter_context(open(default_domain_path)) + config_file = stack.enter_context(open(default_stack_config)) + core_file = stack.enter_context(open(default_stories_file)) - config_file.close() - core_file.close() + payload = dict( + domain=domain_file.read(), + config=config_file.read(), + stories=core_file.read(), + ) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 200 @@ -313,6 +467,169 @@ def test_train_core_success( assert os.path.exists(os.path.join(model_path, "fingerprint.json")) +def test_train_with_retrieval_events_success( + rasa_app: SanicTestClient, default_stack_config: Text +): + with ExitStack() as stack: + domain_file = stack.enter_context( + open("data/test_domains/default_retrieval_intents.yml") + ) + config_file = stack.enter_context(open(default_stack_config)) + core_file = stack.enter_context( + open("data/test_stories/stories_retrieval_intents.md") + ) + responses_file = stack.enter_context(open("data/test_responses/default.md")) + nlu_file = stack.enter_context( + open("data/test_nlu/default_retrieval_intents.md") + ) + + payload = dict( + domain=domain_file.read(), + config=config_file.read(), + stories=core_file.read(), + responses=responses_file.read(), + nlu=nlu_file.read(), + ) + + _, response = rasa_app.post("/model/train", json=payload) + assert response.status == 200 + assert_trained_model(response.body) + + +def assert_trained_model(response_body: bytes) -> None: + # save model to temporary file + tempdir = tempfile.mkdtemp() + model_path = os.path.join(tempdir, "model.tar.gz") + with open(model_path, "wb") as f: + f.write(response_body) + + # unpack model and ensure fingerprint is present + model_path = unpack_model(model_path) + assert os.path.exists(os.path.join(model_path, "fingerprint.json")) + + +@pytest.mark.parametrize( + "payload", + [ + {"config": None, "stories": None, "nlu": None, "domain": None, "force": True}, + { + "config": None, + "stories": None, + "nlu": None, + "domain": None, + "force": False, + "save_to_default_model_directory": True, + }, + { + "config": None, + "stories": None, + "nlu": None, + "domain": None, + "save_to_default_model_directory": False, + }, + ], +) +def test_deprecation_warnings_json_payload(payload: Dict): + with pytest.warns(FutureWarning): + rasa.server._validate_json_training_payload(payload) + + +def test_train_with_yaml(rasa_app: SanicTestClient): + training_data = """ +stories: +- story: My story + steps: + - intent: greet + - action: utter_greet + +rules: +- story: My rule + steps: + - intent: greet + - action: utter_greet + +intents: +- greet + +nlu: +- intent: greet + examples: | + - hi + - hello + +responses: + utter_greet: + - text: Hi + +language: en + +polices: +- name: RulePolicy + +pipeline: + - name: WhitespaceTokenizer + - name: CountVectorsFeaturizer + - name: DucklingHTTPExtractor + - name: DIETClassifier + epochs: 1 +""" + _, response = rasa_app.post( + "/model/train", + data=training_data, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + ) + + assert response.status == 200 + assert_trained_model(response.body) + + +def test_train_with_invalid_yaml(rasa_app: SanicTestClient): + invalid_yaml = """ +rules: +rule my rule +""" + + _, response = rasa_app.post( + "/model/train", + data=invalid_yaml, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + ) + assert response.status == 400 + + +@pytest.mark.parametrize( + "headers, expected", + [({}, False), ({"force_training": False}, False), ({"force_training": True}, True)], +) +def test_training_payload_from_yaml_force_training(headers: Dict, expected: bool): + request = Mock() + request.body = b"" + request.args = headers + + payload = rasa.server._training_payload_from_yaml(request) + assert payload.get("force_training") == expected + + +@pytest.mark.parametrize( + "headers, expected", + [ + ({}, rasa.constants.DEFAULT_MODELS_PATH), + ({"save_to_default_model_directory": False}, ANY), + ({"save_to_default_model_directory": True}, rasa.constants.DEFAULT_MODELS_PATH), + ], +) +def test_training_payload_from_yaml_save_to_default_model_directory( + headers: Dict, expected: Text +): + request = Mock() + request.body = b"" + request.args = headers + + payload = rasa.server._training_payload_from_yaml(request) + assert payload.get("output") + assert payload.get("output") == expected + + def test_train_missing_config(rasa_app: SanicTestClient): payload = dict(domain="domain data", config=None) @@ -334,9 +651,8 @@ def test_train_internal_error(rasa_app: SanicTestClient): assert response.status == 500 -def test_evaluate_stories(rasa_app, default_stories_file): - with open(default_stories_file, "r") as f: - stories = f.read() +def test_evaluate_stories(rasa_app: SanicTestClient, default_stories_file: Text): + stories = rasa.utils.io.read_file(default_stories_file) _, response = rasa_app.post("/model/test/stories", data=stories) @@ -362,19 +678,19 @@ def test_evaluate_stories(rasa_app, default_stories_file): def test_evaluate_stories_not_ready_agent( - rasa_app_nlu: SanicTestClient, default_stories_file + rasa_app_nlu: SanicTestClient, default_stories_file: Text ): - with open(default_stories_file, "r") as f: - stories = f.read() + stories = rasa.utils.io.read_file(default_stories_file) _, response = rasa_app_nlu.post("/model/test/stories", data=stories) assert response.status == 409 -def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file): - with open(end_to_end_story_file, "r") as f: - stories = f.read() +def test_evaluate_stories_end_to_end( + rasa_app: SanicTestClient, end_to_end_story_file: Text +): + stories = rasa.utils.io.read_file(end_to_end_story_file) _, response = rasa_app.post("/model/test/stories?e2e=true", data=stories) @@ -398,11 +714,14 @@ def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file): } -def test_evaluate_intent(rasa_app, default_nlu_data): - with open(default_nlu_data, "r") as f: - nlu_data = f.read() +def test_evaluate_intent(rasa_app: SanicTestClient, default_nlu_data: Text): + nlu_data = rasa.utils.io.read_file(default_nlu_data) - _, response = rasa_app.post("/model/test/intents", data=nlu_data) + _, response = rasa_app.post( + "/model/test/intents", + data=nlu_data, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + ) assert response.status == 200 assert set(response.json.keys()) == { @@ -413,12 +732,15 @@ def test_evaluate_intent(rasa_app, default_nlu_data): def test_evaluate_intent_on_just_nlu_model( - rasa_app_nlu: SanicTestClient, default_nlu_data + rasa_app_nlu: SanicTestClient, default_nlu_data: Text ): - with open(default_nlu_data, "r") as f: - nlu_data = f.read() + nlu_data = rasa.utils.io.read_file(default_nlu_data) - _, response = rasa_app_nlu.post("/model/test/intents", data=nlu_data) + _, response = rasa_app_nlu.post( + "/model/test/intents", + data=nlu_data, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + ) assert response.status == 200 assert set(response.json.keys()) == { @@ -429,16 +751,17 @@ def test_evaluate_intent_on_just_nlu_model( def test_evaluate_intent_with_query_param( - rasa_app, trained_nlu_model, default_nlu_data + rasa_app: SanicTestClient, trained_nlu_model, default_nlu_data: Text ): _, response = rasa_app.get("/status") previous_model_file = response.json["model_file"] - with open(default_nlu_data, "r") as f: - nlu_data = f.read() + nlu_data = rasa.utils.io.read_file(default_nlu_data) _, response = rasa_app.post( - "/model/test/intents?model={}".format(trained_nlu_model), data=nlu_data + f"/model/test/intents?model={trained_nlu_model}", + data=nlu_data, + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, ) assert response.status == 200 @@ -462,7 +785,7 @@ def test_predict(rasa_app: SanicTestClient): "text": "hello", "parse_data": { "entities": [], - "intent": {"confidence": 0.57, "name": "greet"}, + "intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"}, "text": "hello", }, }, @@ -470,7 +793,9 @@ def test_predict(rasa_app: SanicTestClient): } } _, response = rasa_app.post( - "/model/predict", json=data, headers={"Content-Type": "application/json"} + "/model/predict", + json=data, + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) content = response.json assert response.status == 200 @@ -485,68 +810,83 @@ def test_requesting_non_existent_tracker(rasa_app: SanicTestClient): content = response.json assert response.status == 200 assert content["paused"] is False - assert content["slots"] == {"location": None, "cuisine": None} + assert content["slots"] == {"name": None} assert content["sender_id"] == "madeupid" assert content["events"] == [ { "event": "action", - "name": "action_listen", + "name": "action_session_start", "policy": None, "confidence": None, "timestamp": 1514764800, - } + }, + {"event": "session_started", "timestamp": 1514764800}, + { + "event": "action", + INTENT_NAME_KEY: "action_listen", + "policy": None, + "confidence": None, + "timestamp": 1514764800, + }, ] assert content["latest_message"] == { "text": None, "intent": {}, "entities": [], "message_id": None, - "metadata": None, + "metadata": {}, } @pytest.mark.parametrize("event", test_events) -def test_pushing_event(rasa_app, event): - cid = str(uuid.uuid1()) - conversation = "/conversations/{}".format(cid) +def test_pushing_event(rasa_app: SanicTestClient, event: Event): + sender_id = str(uuid.uuid1()) + conversation = f"/conversations/{sender_id}" + serialized_event = event.as_dict() + # Remove timestamp so that a new one is assigned on the server + serialized_event.pop("timestamp") + + time_before_adding_events = time.time() _, response = rasa_app.post( - "{}/tracker/events".format(conversation), - json=event.as_dict(), - headers={"Content-Type": "application/json"}, + f"{conversation}/tracker/events", + json=serialized_event, + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) assert response.json is not None assert response.status == 200 - _, tracker_response = rasa_app.get("/conversations/{}/tracker".format(cid)) + _, tracker_response = rasa_app.get(f"/conversations/{sender_id}/tracker") tracker = tracker_response.json assert tracker is not None - assert len(tracker.get("events")) == 2 - evt = tracker.get("events")[1] - assert Event.from_parameters(evt) == event + assert len(tracker.get("events")) == 1 + + evt = tracker.get("events")[0] + deserialised_event = Event.from_parameters(evt) + assert deserialised_event == event + assert deserialised_event.timestamp > time_before_adding_events def test_push_multiple_events(rasa_app: SanicTestClient): - cid = str(uuid.uuid1()) - conversation = "/conversations/{}".format(cid) + conversation_id = str(uuid.uuid1()) + conversation = f"/conversations/{conversation_id}" events = [e.as_dict() for e in test_events] _, response = rasa_app.post( - "{}/tracker/events".format(conversation), + f"{conversation}/tracker/events", json=events, - headers={"Content-Type": "application/json"}, + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) assert response.json is not None assert response.status == 200 - _, tracker_response = rasa_app.get("/conversations/{}/tracker".format(cid)) + _, tracker_response = rasa_app.get(f"/conversations/{conversation_id}/tracker") tracker = tracker_response.json assert tracker is not None # there is also an `ACTION_LISTEN` event at the start - assert len(tracker.get("events")) == len(test_events) + 1 - assert tracker.get("events")[1:] == events + assert tracker.get("events") == events def test_put_tracker(rasa_app: SanicTestClient): @@ -554,7 +894,7 @@ def test_put_tracker(rasa_app: SanicTestClient): _, response = rasa_app.put( "/conversations/pushtracker/tracker/events", json=data, - headers={"Content-Type": "application/json"}, + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) content = response.json assert response.status == 200 @@ -580,15 +920,15 @@ def test_sorted_predict(rasa_app: SanicTestClient): def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None: data = [event.as_dict() for event in test_events[:3]] _, response = app.put( - "/conversations/{}/tracker/events".format(sender_id), + f"/conversations/{sender_id}/tracker/events", json=data, - headers={"Content-Type": "application/json"}, + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) assert response.status == 200 -def test_get_tracker_with_jwt(rasa_secured_app): +def test_get_tracker_with_jwt(rasa_secured_app: SanicTestClient): # token generated with secret "core" and algorithm HS256 # on https://jwt.io/ @@ -627,10 +967,8 @@ def test_get_tracker_with_jwt(rasa_secured_app): assert response.status == 200 -def test_list_routes(default_agent): - from rasa import server - - app = server.create_app(default_agent, auth_token=None) +def test_list_routes(default_agent: Agent): + app = rasa.server.create_app(default_agent, auth_token=None) routes = utils.list_routes(app) assert set(routes.keys()) == { @@ -642,6 +980,7 @@ def test_list_routes(default_agent): "replace_events", "retrieve_story", "execute_action", + "trigger_intent", "predict", "add_message", "train", @@ -665,7 +1004,9 @@ def test_unload_model_error(rasa_app: SanicTestClient): def test_get_domain(rasa_app: SanicTestClient): - _, response = rasa_app.get("/domain", headers={"accept": "application/json"}) + _, response = rasa_app.get( + "/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE} + ) content = response.json @@ -674,7 +1015,7 @@ def test_get_domain(rasa_app: SanicTestClient): assert "intents" in content assert "entities" in content assert "slots" in content - assert "templates" in content + assert "responses" in content assert "actions" in content @@ -684,7 +1025,7 @@ def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient): assert response.status == 406 -def test_load_model(rasa_app: SanicTestClient, trained_core_model): +def test_load_model(rasa_app: SanicTestClient, trained_core_model: Text): _, response = rasa_app.get("/status") assert response.status == 200 @@ -705,7 +1046,9 @@ def test_load_model(rasa_app: SanicTestClient, trained_core_model): assert old_fingerprint != response.json["fingerprint"] -def test_load_model_from_model_server(rasa_app: SanicTestClient, trained_core_model): +def test_load_model_from_model_server( + rasa_app: SanicTestClient, trained_core_model: Text +): _, response = rasa_app.get("/status") assert response.status == 200 @@ -757,7 +1100,7 @@ def test_load_model_invalid_configuration(rasa_app: SanicTestClient): def test_execute(rasa_app: SanicTestClient): _create_tracker_for_sender(rasa_app, "test_execute") - data = {"name": "utter_greet"} + data = {INTENT_NAME_KEY: "utter_greet"} _, response = rasa_app.post("/conversations/test_execute/execute", json=data) assert response.status == 200 @@ -772,9 +1115,7 @@ def test_execute_with_missing_action_name(rasa_app: SanicTestClient): _create_tracker_for_sender(rasa_app, test_sender) data = {"wrong-key": "utter_greet"} - _, response = rasa_app.post( - "/conversations/{}/execute".format(test_sender), json=data - ) + _, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data) assert response.status == 400 @@ -784,11 +1125,43 @@ def test_execute_with_not_existing_action(rasa_app: SanicTestClient): _create_tracker_for_sender(rasa_app, test_sender) data = {"name": "ka[pa[opi[opj[oj[oija"} + _, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data) + + assert response.status == 500 + + +def test_trigger_intent(rasa_app: SanicTestClient): + data = {INTENT_NAME_KEY: "greet"} + _, response = rasa_app.post("/conversations/test_trigger/trigger_intent", json=data) + + assert response.status == 200 + + parsed_content = response.json + assert parsed_content["tracker"] + assert parsed_content["messages"] + + +def test_trigger_intent_with_missing_intent_name(rasa_app: SanicTestClient): + test_sender = "test_trigger_intent_with_missing_action_name" + + data = {"wrong-key": "greet"} _, response = rasa_app.post( - "/conversations/{}/execute".format(test_sender), json=data + f"/conversations/{test_sender}/trigger_intent", json=data ) - assert response.status == 500 + assert response.status == 400 + + +def test_trigger_intent_with_not_existing_intent(rasa_app: SanicTestClient): + test_sender = "test_trigger_intent_with_not_existing_intent" + _create_tracker_for_sender(rasa_app, test_sender) + + data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"} + _, response = rasa_app.post( + f"/conversations/{test_sender}/trigger_intent", json=data + ) + + assert response.status == 404 @pytest.mark.parametrize( @@ -802,7 +1175,7 @@ def test_execute_with_not_existing_action(rasa_app: SanicTestClient): ], ) def test_get_output_channel( - input_channels: List[Text], output_channel_to_use, expected_channel: Type + input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type ): request = MagicMock() app = MagicMock() diff --git a/tests/test_test.py b/tests/test_test.py new file mode 100644 index 000000000000..126ad90836d4 --- /dev/null +++ b/tests/test_test.py @@ -0,0 +1,179 @@ +import asyncio +import sys +from pathlib import Path +from typing import Text +from unittest.mock import Mock + +import pytest +from _pytest.capture import CaptureFixture +from _pytest.monkeypatch import MonkeyPatch + +import rasa.model +import rasa.cli.utils +from rasa.core.agent import Agent +from rasa.core.interpreter import RasaNLUInterpreter, RegexInterpreter +from rasa.nlu.test import NO_ENTITY +import rasa.core + + +def monkeypatch_get_latest_model(tmp_path: Path, monkeypatch: MonkeyPatch) -> None: + latest_model = tmp_path / "my_test_model.tar.gz" + monkeypatch.setattr(rasa.model, "get_latest_model", lambda: str(latest_model)) + + +def test_get_sanitized_model_directory_when_not_passing_model( + capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch +): + from rasa.test import _get_sanitized_model_directory + + monkeypatch_get_latest_model(tmp_path, monkeypatch) + + # Create a fake model on disk so that `is_file` returns `True` + latest_model = Path(rasa.model.get_latest_model()) + latest_model.touch() + + # Input: default model file + # => Should return containing directory + new_modeldir = _get_sanitized_model_directory(str(latest_model)) + captured = capsys.readouterr() + assert not captured.out + assert new_modeldir == str(latest_model.parent) + + +def test_get_sanitized_model_directory_when_passing_model_file_explicitly( + capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch +): + from rasa.test import _get_sanitized_model_directory + + monkeypatch_get_latest_model(tmp_path, monkeypatch) + + other_model = tmp_path / "my_test_model1.tar.gz" + assert str(other_model) != rasa.model.get_latest_model() + other_model.touch() + + # Input: some file + # => Should return containing directory and print a warning + new_modeldir = _get_sanitized_model_directory(str(other_model)) + captured = capsys.readouterr() + assert captured.out + assert new_modeldir == str(other_model.parent) + + +def test_get_sanitized_model_directory_when_passing_other_input( + capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch +): + from rasa.test import _get_sanitized_model_directory + + monkeypatch_get_latest_model(tmp_path, monkeypatch) + + # Input: anything that is not an existing file + # => Should return input + modeldir = "random_dir" + assert not Path(modeldir).is_file() + new_modeldir = _get_sanitized_model_directory(modeldir) + captured = capsys.readouterr() + assert not captured.out + assert new_modeldir == modeldir + + +@pytest.mark.parametrize( + "targets,predictions,expected_precision,expected_fscore,expected_accuracy", + [ + ( + ["no_entity", "location", "no_entity", "location", "no_entity"], + ["no_entity", "location", "no_entity", "no_entity", "person"], + 1.0, + 0.6666666666666666, + 3 / 5, + ), + ( + ["no_entity", "no_entity", "no_entity", "no_entity", "person"], + ["no_entity", "no_entity", "no_entity", "no_entity", "no_entity"], + 0.0, + 0.0, + 4 / 5, + ), + ], +) +def test_get_evaluation_metrics( + targets, predictions, expected_precision, expected_fscore, expected_accuracy +): + from rasa.test import get_evaluation_metrics + + report, precision, f1, accuracy = get_evaluation_metrics( + targets, predictions, True, exclude_label=NO_ENTITY + ) + + assert f1 == expected_fscore + assert precision == expected_precision + assert accuracy == expected_accuracy + assert NO_ENTITY not in report + + +@pytest.mark.parametrize( + "targets,exclude_label,expected", + [ + ( + ["no_entity", "location", "location", "location", "person"], + NO_ENTITY, + ["location", "person"], + ), + ( + ["no_entity", "location", "location", "location", "person"], + None, + ["no_entity", "location", "person"], + ), + (["no_entity"], NO_ENTITY, []), + (["location", "location", "location"], NO_ENTITY, ["location"]), + ([], None, []), + ], +) +def test_get_label_set(targets, exclude_label, expected): + from rasa.test import get_unique_labels + + actual = get_unique_labels(targets, exclude_label) + assert set(expected) == set(actual) + + +async def test_interpreter_passed_to_agent( + monkeypatch: MonkeyPatch, trained_rasa_model: Text +): + from rasa.test import test_core + + # Patching is bit more complicated as we have a module `train` and function + # with the same name 😬 + monkeypatch.setattr( + sys.modules["rasa.test"], "_test_core", asyncio.coroutine(lambda *_, **__: True) + ) + + agent_load = Mock() + monkeypatch.setattr(Agent, "load", agent_load) + + test_core(trained_rasa_model) + + agent_load.assert_called_once() + _, _, kwargs = agent_load.mock_calls[0] + assert isinstance(kwargs["interpreter"], RasaNLUInterpreter) + + +async def test_e2e_warning_if_no_nlu_model( + monkeypatch: MonkeyPatch, trained_core_model: Text, capsys: CaptureFixture +): + from rasa.test import test_core + + # Patching is bit more complicated as we have a module `train` and function + # with the same name 😬 + monkeypatch.setattr( + sys.modules["rasa.test"], "_test_core", asyncio.coroutine(lambda *_, **__: True) + ) + + agent_load = Mock() + monkeypatch.setattr(Agent, "load", agent_load) + + test_core(trained_core_model, additional_arguments={"e2e": True}) + + assert "No NLU model found. Using default" in capsys.readouterr().out + + agent_load.assert_called_once() + _, _, kwargs = agent_load.mock_calls[0] + assert isinstance(kwargs["interpreter"], RegexInterpreter) diff --git a/tests/test_train.py b/tests/test_train.py index eccdf3b2d4fc..73fd2582301d 100644 --- a/tests/test_train.py +++ b/tests/test_train.py @@ -1,16 +1,27 @@ +import asyncio +import sys import tempfile import os -import shutil +from pathlib import Path +from typing import Text, Dict +from unittest.mock import Mock import pytest +from _pytest.capture import CaptureFixture +from _pytest.monkeypatch import MonkeyPatch import rasa.model - -from rasa.train import train +import rasa.core +from rasa.core.interpreter import RasaNLUInterpreter + +from rasa.train import train_core, train_nlu, train +from tests.conftest import DEFAULT_CONFIG_PATH, DEFAULT_NLU_DATA +from tests.core.conftest import ( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_STORIES_FILE, +) from tests.core.test_model import _fingerprint -TEST_TEMP = "test_tmp" - @pytest.mark.parametrize( "parameters", @@ -20,7 +31,7 @@ {"model_name": None, "prefix": None}, ], ) -def test_package_model(trained_rasa_model, parameters): +def test_package_model(trained_rasa_model: Text, parameters: Dict): output_path = tempfile.mkdtemp() train_path = rasa.model.unpack_model(trained_rasa_model) @@ -45,32 +56,43 @@ def test_package_model(trained_rasa_model, parameters): assert file_name.endswith(".tar.gz") -@pytest.fixture -def move_tempdir(): - # Create a new *empty* tmp directory - shutil.rmtree(TEST_TEMP, ignore_errors=True) - os.mkdir(TEST_TEMP) - tempfile.tempdir = TEST_TEMP - yield - tempfile.tempdir = None - shutil.rmtree(TEST_TEMP) +def count_temp_rasa_files(directory: Text) -> int: + return len( + [ + entry + for entry in os.listdir(directory) + if not any( + [ + # Ignore the following files/directories: + entry == "__pycache__", # Python bytecode + entry.endswith(".py") # Temp .py files created by TF + # Anything else is considered to be created by Rasa + ] + ) + ] + ) def test_train_temp_files( - move_tempdir, - default_domain_path, - default_stories_file, - default_stack_config, - default_nlu_data, + tmp_path: Text, + monkeypatch: MonkeyPatch, + default_domain_path: Text, + default_stories_file: Text, + default_stack_config: Text, + default_nlu_data: Text, ): + monkeypatch.setattr(tempfile, "tempdir", tmp_path) + output = "test_train_temp_files_models" + train( default_domain_path, default_stack_config, [default_stories_file, default_nlu_data], + output=output, force_training=True, ) - assert len(os.listdir(TEST_TEMP)) == 0 + assert count_temp_rasa_files(tempfile.tempdir) == 0 # After training the model, try to do it again. This shouldn't try to train # a new model because nothing has been changed. It also shouldn't create @@ -79,6 +101,149 @@ def test_train_temp_files( default_domain_path, default_stack_config, [default_stories_file, default_nlu_data], + output=output, + ) + + assert count_temp_rasa_files(tempfile.tempdir) == 0 + + +def test_train_core_temp_files( + tmp_path: Text, + monkeypatch: MonkeyPatch, + default_domain_path: Text, + default_stories_file: Text, + default_stack_config: Text, +): + monkeypatch.setattr(tempfile, "tempdir", tmp_path) + + train_core( + default_domain_path, + default_stack_config, + default_stories_file, + output="test_train_core_temp_files_models", + ) + + assert count_temp_rasa_files(tempfile.tempdir) == 0 + + +def test_train_nlu_temp_files( + tmp_path: Text, + monkeypatch: MonkeyPatch, + default_stack_config: Text, + default_nlu_data: Text, +): + monkeypatch.setattr(tempfile, "tempdir", tmp_path) + + train_nlu( + default_stack_config, + default_nlu_data, + output="test_train_nlu_temp_files_models", + ) + + assert count_temp_rasa_files(tempfile.tempdir) == 0 + + +def test_train_nlu_wrong_format_error_message( + capsys: CaptureFixture, + tmp_path: Text, + monkeypatch: MonkeyPatch, + default_stack_config: Text, + incorrect_nlu_data: Text, +): + monkeypatch.setattr(tempfile, "tempdir", tmp_path) + + train_nlu( + default_stack_config, + incorrect_nlu_data, + output="test_train_nlu_temp_files_models", + ) + + captured = capsys.readouterr() + assert "Please verify the data format" in captured.out + + +def test_train_nlu_no_nlu_file_error_message( + capsys: CaptureFixture, + tmp_path: Text, + monkeypatch: MonkeyPatch, + default_stack_config: Text, +): + monkeypatch.setattr(tempfile, "tempdir", tmp_path) + + train_nlu(default_stack_config, "", output="test_train_nlu_temp_files_models") + + captured = capsys.readouterr() + assert "No NLU data given" in captured.out + + +def test_trained_interpreter_passed_to_core_training( + monkeypatch: MonkeyPatch, tmp_path: Path, unpacked_trained_moodbot_path: Text +): + # Skip actual NLU training and return trained interpreter path from fixture + _train_nlu_with_validated_data = Mock(return_value=unpacked_trained_moodbot_path) + + # Patching is bit more complicated as we have a module `train` and function + # with the same name 😬 + monkeypatch.setattr( + sys.modules["rasa.train"], + "_train_nlu_with_validated_data", + asyncio.coroutine(_train_nlu_with_validated_data), ) - assert len(os.listdir(TEST_TEMP)) == 0 + # Mock the actual Core training + _train_core = Mock() + monkeypatch.setattr(rasa.core, "train", asyncio.coroutine(_train_core)) + + train( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_CONFIG_PATH, + [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA], + str(tmp_path), + ) + + _train_core.assert_called_once() + _, _, kwargs = _train_core.mock_calls[0] + assert isinstance(kwargs["interpreter"], RasaNLUInterpreter) + + +def test_interpreter_of_old_model_passed_to_core_training( + monkeypatch: MonkeyPatch, tmp_path: Path, trained_moodbot_path: Text +): + # NLU isn't retrained + monkeypatch.setattr( + rasa.model.FingerprintComparisonResult, + rasa.model.FingerprintComparisonResult.should_retrain_nlu.__name__, + lambda _: False, + ) + + # An old model with an interpreter exists + monkeypatch.setattr( + rasa.model, rasa.model.get_latest_model.__name__, lambda _: trained_moodbot_path + ) + + # Mock the actual Core training + _train_core = Mock() + monkeypatch.setattr(rasa.core, "train", asyncio.coroutine(_train_core)) + + train( + DEFAULT_DOMAIN_PATH_WITH_SLOTS, + DEFAULT_CONFIG_PATH, + [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA], + str(tmp_path), + ) + + _train_core.assert_called_once() + _, _, kwargs = _train_core.mock_calls[0] + assert isinstance(kwargs["interpreter"], RasaNLUInterpreter) + + +def test_load_interpreter_returns_none_for_none(): + from rasa.train import _load_interpreter + + assert _load_interpreter(None) is None + + +def test_interpreter_from_previous_model_returns_none_for_none(): + from rasa.train import _interpreter_from_previous_model + + assert _interpreter_from_previous_model(None) is None diff --git a/tests/test_validator.py b/tests/test_validator.py new file mode 100644 index 000000000000..2337883b3eb2 --- /dev/null +++ b/tests/test_validator.py @@ -0,0 +1,173 @@ +from pep440_version_utils import Version + +import pytest + +from rasa.constants import LATEST_TRAINING_DATA_FORMAT_VERSION +from rasa.validator import Validator, KEY_TRAINING_DATA_FORMAT_VERSION +from rasa.importers.rasa import RasaFileImporter +from tests.conftest import DEFAULT_NLU_DATA +from tests.core.conftest import DEFAULT_STORIES_FILE +import rasa.utils.io as io_utils + + +async def test_verify_intents_does_not_fail_on_valid_data(): + importer = RasaFileImporter( + domain_path="examples/moodbot/domain.yml", + training_data_paths=[DEFAULT_NLU_DATA], + ) + validator = await Validator.from_importer(importer) + assert validator.verify_intents() + + +async def test_verify_intents_does_fail_on_invalid_data(): + # domain and nlu data are from different domain and should produce warnings + importer = RasaFileImporter( + domain_path="data/test_domains/default.yml", + training_data_paths=[DEFAULT_NLU_DATA], + ) + validator = await Validator.from_importer(importer) + assert not validator.verify_intents() + + +async def test_verify_valid_utterances(): + importer = RasaFileImporter( + domain_path="data/test_domains/default.yml", + training_data_paths=[DEFAULT_NLU_DATA, DEFAULT_STORIES_FILE], + ) + validator = await Validator.from_importer(importer) + assert validator.verify_utterances() + + +async def test_verify_story_structure(): + importer = RasaFileImporter( + domain_path="data/test_domains/default.yml", + training_data_paths=[DEFAULT_STORIES_FILE], + ) + validator = await Validator.from_importer(importer) + assert validator.verify_story_structure(ignore_warnings=False) + + +async def test_verify_bad_story_structure(): + importer = RasaFileImporter( + domain_path="data/test_domains/default.yml", + training_data_paths=["data/test_stories/stories_conflicting_2.md"], + ) + validator = await Validator.from_importer(importer) + assert not validator.verify_story_structure(ignore_warnings=False) + + +async def test_verify_bad_story_structure_ignore_warnings(): + importer = RasaFileImporter( + domain_path="data/test_domains/default.yml", + training_data_paths=["data/test_stories/stories_conflicting_2.md"], + ) + validator = await Validator.from_importer(importer) + assert validator.verify_story_structure(ignore_warnings=True) + + +async def test_fail_on_invalid_utterances(tmpdir): + # domain and stories are from different domain and should produce warnings + invalid_domain = str(tmpdir / "invalid_domain.yml") + io_utils.write_yaml( + { + "responses": {"utter_greet": {"text": "hello"}}, + "actions": [ + "utter_greet", + "utter_non_existent", # error: utter template odes not exist + ], + }, + invalid_domain, + ) + importer = RasaFileImporter(domain_path=invalid_domain) + validator = await Validator.from_importer(importer) + assert not validator.verify_utterances() + + +async def test_verify_there_is_example_repetition_in_intents(): + # moodbot nlu data already has duplicated example 'good afternoon' + # for intents greet and goodbye + importer = RasaFileImporter( + domain_path="examples/moodbot/domain.yml", + training_data_paths=[DEFAULT_NLU_DATA], + ) + validator = await Validator.from_importer(importer) + assert not validator.verify_example_repetition_in_intents(False) + + +async def test_verify_logging_message_for_repetition_in_intents(caplog): + # moodbot nlu data already has duplicated example 'good afternoon' + # for intents greet and goodbye + importer = RasaFileImporter( + domain_path="examples/moodbot/domain.yml", + training_data_paths=[DEFAULT_NLU_DATA], + ) + validator = await Validator.from_importer(importer) + caplog.clear() # clear caplog to avoid counting earlier debug messages + with pytest.warns(UserWarning) as record: + validator.verify_example_repetition_in_intents(False) + assert len(record) == 1 + assert "You should fix that conflict " in record[0].message.args[0] + + +async def test_early_exit_on_invalid_domain(): + domain_path = "data/test_domains/duplicate_intents.yml" + + importer = RasaFileImporter(domain_path=domain_path) + with pytest.warns(UserWarning) as record: + validator = await Validator.from_importer(importer) + validator.verify_domain_validity() + + # two for non-unique domains, two for missing version + assert len(record) == 4 + assert ( + f"Loading domain from '{domain_path}' failed. Using empty domain. " + "Error: 'Intents are not unique! Found multiple intents with name(s) " + "['default', 'goodbye']. Either rename or remove the duplicate ones.'" + in record[1].message.args[0] + ) + assert record[1].message.args[0] == record[3].message.args[0] + + +async def test_verify_there_is_not_example_repetition_in_intents(): + importer = RasaFileImporter( + domain_path="examples/moodbot/domain.yml", + training_data_paths=["examples/knowledgebasebot/data/nlu.md"], + ) + validator = await Validator.from_importer(importer) + assert validator.verify_example_repetition_in_intents(False) + + +async def test_future_training_data_format_version_not_compatible(): + + next_minor = str(Version(LATEST_TRAINING_DATA_FORMAT_VERSION).next_minor()) + + incompatible_version = {KEY_TRAINING_DATA_FORMAT_VERSION: next_minor} + + with pytest.warns(UserWarning): + assert not Validator.validate_training_data_format_version( + incompatible_version, "" + ) + + +async def test_compatible_training_data_format_version(): + + prev_major = str(Version("1.0")) + + compatible_version_1 = {KEY_TRAINING_DATA_FORMAT_VERSION: prev_major} + compatible_version_2 = { + KEY_TRAINING_DATA_FORMAT_VERSION: LATEST_TRAINING_DATA_FORMAT_VERSION + } + + for version in [compatible_version_1, compatible_version_2]: + with pytest.warns(None): + assert Validator.validate_training_data_format_version(version, "") + + +async def test_invalid_training_data_format_version_warns(): + + invalid_version_1 = {KEY_TRAINING_DATA_FORMAT_VERSION: 2.0} + invalid_version_2 = {KEY_TRAINING_DATA_FORMAT_VERSION: "Rasa"} + + for version in [invalid_version_1, invalid_version_2]: + with pytest.warns(UserWarning): + assert Validator.validate_training_data_format_version(version, "") diff --git a/tests/utilities.py b/tests/utilities.py index 6c334f75905e..dd0bd950fe87 100644 --- a/tests/utilities.py +++ b/tests/utilities.py @@ -1,4 +1,11 @@ from yarl import URL +from typing import Text + +import rasa.utils.io as io_utils + +from rasa.nlu.classifiers.diet_classifier import DIETClassifier +from rasa.nlu.selectors.response_selector import ResponseSelector +from rasa.utils.tensorflow.constants import EPOCHS def latest_request(mocked, request_type, path): @@ -7,3 +14,20 @@ def latest_request(mocked, request_type, path): def json_of_latest_request(r): return r[-1].kwargs["json"] + + +def update_number_of_epochs(config_path: Text, output_file: Text): + config = io_utils.read_yaml_file(config_path) + + if "pipeline" not in config.keys(): + raise ValueError(f"Invalid config provided! File: '{config_path}'.") + + for component in config["pipeline"]: + # do not update epochs for pipeline templates + if not isinstance(component, dict): + continue + + if component["name"] in [DIETClassifier.name, ResponseSelector.name]: + component[EPOCHS] = 1 + + io_utils.write_yaml(config, output_file) diff --git a/tests/utils/tensorflow/__init__.py b/tests/utils/tensorflow/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/utils/tensorflow/test_model_data.py b/tests/utils/tensorflow/test_model_data.py new file mode 100644 index 000000000000..627b37c23f1a --- /dev/null +++ b/tests/utils/tensorflow/test_model_data.py @@ -0,0 +1,189 @@ +import copy + +import pytest +import scipy.sparse +import numpy as np + +from rasa.utils.tensorflow.model_data import RasaModelData + + +@pytest.fixture +async def model_data() -> RasaModelData: + return RasaModelData( + label_key="intent_ids", + data={ + "text_features": [ + np.array( + [ + np.random.rand(5, 14), + np.random.rand(2, 14), + np.random.rand(3, 14), + np.random.rand(1, 14), + np.random.rand(3, 14), + ] + ), + np.array( + [ + scipy.sparse.csr_matrix(np.random.randint(5, size=(5, 10))), + scipy.sparse.csr_matrix(np.random.randint(5, size=(2, 10))), + scipy.sparse.csr_matrix(np.random.randint(5, size=(3, 10))), + scipy.sparse.csr_matrix(np.random.randint(5, size=(1, 10))), + scipy.sparse.csr_matrix(np.random.randint(5, size=(3, 10))), + ] + ), + ], + "intent_features": [ + np.array( + [ + np.random.randint(2, size=(5, 10)), + np.random.randint(2, size=(2, 10)), + np.random.randint(2, size=(3, 10)), + np.random.randint(2, size=(1, 10)), + np.random.randint(2, size=(3, 10)), + ] + ) + ], + "intent_ids": [np.array([0, 1, 0, 1, 1])], + "tag_ids": [ + np.array( + [ + np.array([[0], [1], [1], [0], [2]]), + np.array([[2], [0]]), + np.array([[0], [1], [1]]), + np.array([[0], [1]]), + np.array([[0], [0], [0]]), + ] + ) + ], + }, + ) + + +def test_shuffle_session_data(model_data: RasaModelData): + before = copy.copy(model_data) + + # precondition + assert np.all( + np.array(list(before.values())) == np.array(list(model_data.values())) + ) + + data = model_data._shuffled_data(model_data.data) + + # check that original data didn't change + assert np.all( + np.array(list(before.values())) == np.array(list(model_data.values())) + ) + # check that new data is different + assert np.all(np.array(model_data.values()) != np.array(data.values())) + + +def test_split_data_by_label(model_data: RasaModelData): + split_model_data = model_data._split_by_label_ids( + model_data.data, model_data.get("intent_ids")[0], np.array([0, 1]) + ) + + assert len(split_model_data) == 2 + for s in split_model_data: + assert len(set(s.get("intent_ids")[0])) == 1 + + +def test_split_data_by_none_label(model_data: RasaModelData): + model_data.label_key = None + + split_model_data = model_data.split(2, 42) + + assert len(split_model_data) == 2 + + train_data = split_model_data[0] + test_data = split_model_data[1] + + # train data should have 3 examples + assert len(train_data.get("intent_ids")[0]) == 3 + # test data should have 2 examples + assert len(test_data.get("intent_ids")[0]) == 2 + + +def test_train_val_split(model_data: RasaModelData): + train_model_data, test_model_data = model_data.split(2, 42) + + for k, values in model_data.items(): + assert len(values) == len(train_model_data.get(k)) + assert len(values) == len(test_model_data.get(k)) + for i, v in enumerate(values): + assert v[0].dtype == train_model_data.get(k)[i][0].dtype + + for values in train_model_data.values(): + for v in values: + assert v.shape[0] == 3 + + for values in test_model_data.values(): + for v in values: + assert v.shape[0] == 2 + + +@pytest.mark.parametrize("size", [0, 1, 5]) +def test_train_val_split_incorrect_size(model_data: RasaModelData, size: int): + with pytest.raises(ValueError): + model_data.split(size, 42) + + +def test_session_data_for_ids(model_data: RasaModelData): + filtered_data = model_data._data_for_ids(model_data.data, np.array([0, 1])) + + for values in filtered_data.values(): + for v in values: + assert v.shape[0] == 2 + + k = list(model_data.keys())[0] + + assert np.all(np.array(filtered_data[k][0][0]) == np.array(model_data.get(k)[0][0])) + assert np.all(np.array(filtered_data[k][0][1]) == np.array(model_data.get(k)[0][1])) + + +def test_get_number_of_examples(model_data: RasaModelData): + assert model_data.number_of_examples() == 5 + + +def test_get_number_of_examples_raises_value_error(model_data: RasaModelData): + model_data.data["dense"] = [np.random.randint(5, size=(2, 10))] + with pytest.raises(ValueError): + model_data.number_of_examples() + + +def test_gen_batch(model_data: RasaModelData): + iterator = model_data._gen_batch(2, shuffle=True, batch_strategy="balanced") + print(model_data.data["tag_ids"][0]) + batch = next(iterator) + assert len(batch) == 7 + assert len(batch[0]) == 2 + + batch = next(iterator) + assert len(batch) == 7 + assert len(batch[0]) == 2 + + batch = next(iterator) + assert len(batch) == 7 + assert len(batch[0]) == 1 + + with pytest.raises(StopIteration): + next(iterator) + + +def test_balance_model_data(model_data: RasaModelData): + data = model_data._balanced_data(model_data.data, 2, False) + + assert np.all(data.get("intent_ids")[0] == np.array([0, 1, 1, 0, 1])) + + +def test_not_balance_model_data(model_data: RasaModelData): + test_model_data = RasaModelData(label_key="tag_ids", data=model_data.data) + + data = test_model_data._balanced_data(test_model_data.data, 2, False) + + assert np.all(data.get("tag_ids") == test_model_data.get("tag_ids")) + + +def test_get_num_of_features(model_data: RasaModelData): + num_features = model_data.feature_dimension("text_features") + + assert num_features == 24 diff --git a/tests/utils/tensorflow/test_tf_environment.py b/tests/utils/tensorflow/test_tf_environment.py new file mode 100644 index 000000000000..3f30570d7975 --- /dev/null +++ b/tests/utils/tensorflow/test_tf_environment.py @@ -0,0 +1,11 @@ +import pytest +from typing import Text, Dict +from rasa.utils.tensorflow.environment import _parse_gpu_config + + +@pytest.mark.parametrize( + "gpu_config_string, parsed_gpu_config", + [("0: 1024", {0: 1024}), ("0:1024, 1:2048", {0: 1024, 1: 2048})], +) +def test_gpu_config_parser(gpu_config_string: Text, parsed_gpu_config: Dict[int, int]): + assert _parse_gpu_config(gpu_config_string) == parsed_gpu_config diff --git a/tests/utils/test_common.py b/tests/utils/test_common.py index 3c4db97adb77..de550e179600 100644 --- a/tests/utils/test_common.py +++ b/tests/utils/test_common.py @@ -1,10 +1,95 @@ -import rasa.utils.common as common_utils +import logging +from typing import Collection, List, Text + +import pytest + +from rasa.utils.common import ( + raise_warning, + sort_list_of_dicts_by_first_key, + transform_collection_to_sentence, + RepeatedLogFilter, +) def test_sort_dicts_by_keys(): test_data = [{"Z": 1}, {"A": 10}] expected = [{"A": 10}, {"Z": 1}] - actual = common_utils.sort_list_of_dicts_by_first_key(test_data) + actual = sort_list_of_dicts_by_first_key(test_data) assert actual == expected + + +@pytest.mark.parametrize( + "collection, possible_outputs", + [ + (["a", "b", "c"], ["a, b and c"]), + (["a", "b"], ["a and b"]), + (["a"], ["a"]), + ( + {"a", "b", "c"}, + [ + "a, b and c", + "a, c and b", + "b, a and c", + "b, c and a", + "c, a and b", + "c, b and a", + ], + ), + ({"a", "b"}, ["a and b", "b and a"]), + ({"a"}, ["a"]), + ({}, [""]), + ], +) +def test_transform_collection_to_sentence( + collection: Collection, possible_outputs: List[Text] +): + actual = transform_collection_to_sentence(collection) + assert actual in possible_outputs + + +def test_raise_user_warning(): + with pytest.warns(UserWarning) as record: + raise_warning("My warning.") + + assert len(record) == 1 + assert record[0].message.args[0] == "My warning." + + +def test_raise_future_warning(): + with pytest.warns(FutureWarning) as record: + raise_warning("My future warning.", FutureWarning) + + assert len(record) == 1 + assert record[0].message.args[0] == "My future warning." + + +def test_raise_deprecation(): + with pytest.warns(DeprecationWarning) as record: + raise_warning("My warning.", DeprecationWarning) + + assert len(record) == 1 + assert record[0].message.args[0] == "My warning." + assert isinstance(record[0].message, DeprecationWarning) + + +def test_repeated_log_filter(): + log_filter = RepeatedLogFilter() + record1 = logging.LogRecord( + "rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("yes",), None + ) + record1_same = logging.LogRecord( + "rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("yes",), None + ) + record2_other_args = logging.LogRecord( + "rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("no",), None + ) + record3_other = logging.LogRecord( + "rasa", logging.INFO, "/some/path.py", 42, "Other msg", (), None + ) + assert log_filter.filter(record1) is True + assert log_filter.filter(record1_same) is False # same log + assert log_filter.filter(record2_other_args) is True + assert log_filter.filter(record3_other) is True + assert log_filter.filter(record1) is True # same as before, but not repeated diff --git a/tests/utils/test_endpoints.py b/tests/utils/test_endpoints.py index 115e1ae51ca9..8fde713699f3 100644 --- a/tests/utils/test_endpoints.py +++ b/tests/utils/test_endpoints.py @@ -15,6 +15,16 @@ ("https://example.com/", None, "https://example.com/"), ("https://example.com/", "test", "https://example.com/test"), ("https://example.com/", "test/", "https://example.com/test/"), + ( + "http://duckling.rasa.com:8000", + "/parse", + "http://duckling.rasa.com:8000/parse", + ), + ( + "http://duckling.rasa.com:8000/", + "/parse", + "http://duckling.rasa.com:8000/parse", + ), ], ) def test_concat_url(base, subpath, expected_result): @@ -94,3 +104,19 @@ def test_endpoint_config_custom_token_name(): actual = endpoint_utils.EndpointConfig.from_dict(test_data) assert actual.token_name == "test_token" + + +async def test_request_non_json_response(): + with aioresponses() as mocked: + endpoint = endpoint_utils.EndpointConfig("https://example.com/") + + mocked.post( + "https://example.com/test", + payload="ok", + content_type="application/text", + status=200, + ) + + response = await endpoint.request("post", subpath="test") + + assert not response diff --git a/tests/utils/test_io.py b/tests/utils/test_io.py index 4912382095e5..70f7db7e8dbc 100644 --- a/tests/utils/test_io.py +++ b/tests/utils/test_io.py @@ -1,7 +1,9 @@ -import io import os +import string +import uuid +from collections import OrderedDict from pathlib import Path -from typing import Callable, Text, List, Set +from typing import Callable, Text, List, Set, Dict, Any import pytest from prompt_toolkit.document import Document @@ -146,9 +148,7 @@ def test_emojis_in_tmp_file(): - two £ (?u)\\b\\w+\\b f\u00fcr """ test_file = io_utils.create_temporary_file(test_data) - with io.open(test_file, mode="r", encoding="utf-8") as f: - content = f.read() - content = io_utils.read_yaml(content) + content = io_utils.read_yaml_file(test_file) assert content["data"][0] == "one 😁💯 👩🏿‍💻👨🏿‍💻" assert content["data"][1] == "two £ (?u)\\b\\w+\\b für" @@ -280,7 +280,7 @@ def test_list_directory( sub_sub_directory.mkdir() sub_sub_file = sub_sub_directory / "sub_file.txt" - sub_sub_file.write_text("", encoding="utf-8") + sub_sub_file.write_text("", encoding=io_utils.DEFAULT_ENCODING) file1 = subdirectory / "file.txt" file1.write_text("", encoding="utf-8") @@ -294,3 +294,105 @@ def test_list_directory( expected = {str(subdirectory / entry) for entry in expected} assert set(list_function(str(subdirectory))) == expected + + +def test_write_json_file(tmp_path: Path): + expected = {"abc": "dasds", "list": [1, 2, 3, 4], "nested": {"a": "b"}} + file_path = str(tmp_path / "abc.txt") + + io_utils.dump_obj_as_json_to_file(file_path, expected) + assert io_utils.read_json_file(file_path) == expected + + +def test_write_utf_8_yaml_file(tmp_path: Path): + """This test makes sure that dumping a yaml doesn't result in Uxxxx sequences + but rather directly dumps the unicode character.""" + + file_path = str(tmp_path / "test.yml") + data = {"data": "amazing 🌈"} + + io_utils.write_yaml(data, file_path) + assert io_utils.read_file(file_path) == "data: amazing 🌈\n" + + +def test_create_directory_if_new(tmp_path: Path): + directory = str(tmp_path / "a" / "b") + io_utils.create_directory(directory) + + assert os.path.exists(directory) + + +def test_create_directory_if_already_exists(tmp_path: Path): + # This should not throw an exception + io_utils.create_directory(str(tmp_path)) + assert True + + +def test_create_directory_for_file(tmp_path: Path): + file = str(tmp_path / "dir" / "test.txt") + + io_utils.create_directory_for_file(str(file)) + assert not os.path.exists(file) + assert os.path.exists(os.path.dirname(file)) + + +@pytest.mark.parametrize( + "should_preserve_key_order, expected_keys", + [(True, list(reversed(string.ascii_lowercase))),], +) +def test_dump_yaml_key_order( + tmp_path: Path, should_preserve_key_order: bool, expected_keys: List[Text] +): + file = tmp_path / "test.yml" + + # create YAML file with keys in reverse-alphabetical order + content = "" + for i in reversed(string.ascii_lowercase): + content += f"{i}: {uuid.uuid4().hex}\n" + + file.write_text(content) + + # load this file and ensure keys are in correct reverse-alphabetical order + data = io_utils.read_yaml_file(file) + assert list(data.keys()) == list(reversed(string.ascii_lowercase)) + + # dumping `data` will result in alphabetical or reverse-alphabetical list of keys, + # depending on the value of `should_preserve_key_order` + io_utils.write_yaml(data, file, should_preserve_key_order=should_preserve_key_order) + with file.open() as f: + keys = [line.split(":")[0] for line in f.readlines()] + + assert keys == expected_keys + + +@pytest.mark.parametrize( + "source, target", + [ + # ordinary dictionary + ({"b": "b", "a": "a"}, OrderedDict({"b": "b", "a": "a"})), + # dict with list + ({"b": [1, 2, 3]}, OrderedDict({"b": [1, 2, 3]})), + # nested dict + ({"b": {"c": "d"}}, OrderedDict({"b": OrderedDict({"c": "d"})})), + # doubly-nested dict + ( + {"b": {"c": {"d": "e"}}}, + OrderedDict({"b": OrderedDict({"c": OrderedDict({"d": "e"})})}), + ), + # a list is not affected + ([1, 2, 3], [1, 2, 3]), + # a string also isn't + ("hello", "hello"), + ], +) +def test_convert_to_ordered_dict(source: Any, target: Any): + assert io_utils.convert_to_ordered_dict(source) == target + + def _recursively_check_dict_is_ordered_dict(d): + if isinstance(d, dict): + assert isinstance(d, OrderedDict) + for value in d.values(): + _recursively_check_dict_is_ordered_dict(value) + + # ensure nested dicts are converted correctly + _recursively_check_dict_is_ordered_dict(target) diff --git a/tests/utils/test_train_utils.py b/tests/utils/test_train_utils.py new file mode 100644 index 000000000000..8400a2be68e9 --- /dev/null +++ b/tests/utils/test_train_utils.py @@ -0,0 +1,28 @@ +import numpy as np + +import rasa.utils.train_utils as train_utils +from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS +from rasa.nlu.tokenizers.tokenizer import Token + + +def test_align_token_features(): + tokens = [ + Token("This", 0, data={NUMBER_OF_SUB_TOKENS: 1}), + Token("is", 5, data={NUMBER_OF_SUB_TOKENS: 1}), + Token("a", 8, data={NUMBER_OF_SUB_TOKENS: 1}), + Token("sentence", 10, data={NUMBER_OF_SUB_TOKENS: 2}), + Token("embedding", 19, data={NUMBER_OF_SUB_TOKENS: 4}), + ] + + seq_dim = sum(t.get(NUMBER_OF_SUB_TOKENS) for t in tokens) + token_features = np.random.rand(1, seq_dim, 64) + + actual_features = train_utils.align_token_features([tokens], token_features) + + assert np.all(actual_features[0][0] == token_features[0][0]) + assert np.all(actual_features[0][1] == token_features[0][1]) + assert np.all(actual_features[0][2] == token_features[0][2]) + # sentence is split into 2 sub-tokens + assert np.all(actual_features[0][3] == np.mean(token_features[0][3:5], axis=0)) + # embedding is split into 4 sub-tokens + assert np.all(actual_features[0][4] == np.mean(token_features[0][5:10], axis=0)) diff --git a/tests/utils/test_validation.py b/tests/utils/test_validation.py index d691d07a2036..22af4827feb6 100644 --- a/tests/utils/test_validation.py +++ b/tests/utils/test_validation.py @@ -1,18 +1,22 @@ import pytest +from jsonschema import ValidationError + from rasa.constants import DOMAIN_SCHEMA_FILE, CONFIG_SCHEMA_FILE import rasa.utils.validation as validation_utils import rasa.utils.io as io_utils +import rasa.nlu.schemas.data_schema as schema +from tests.conftest import DEFAULT_NLU_DATA @pytest.mark.parametrize( "file, schema", [ - ("examples/restaurantbot/domain.yml", DOMAIN_SCHEMA_FILE), - ("sample_configs/config_defaults.yml", CONFIG_SCHEMA_FILE), - ("sample_configs/config_supervised_embeddings.yml", CONFIG_SCHEMA_FILE), - ("sample_configs/config_crf_custom_features.yml", CONFIG_SCHEMA_FILE), + ("examples/moodbot/domain.yml", DOMAIN_SCHEMA_FILE), + ("data/test_config/config_defaults.yml", CONFIG_SCHEMA_FILE), + ("data/test_config/config_supervised_embeddings.yml", CONFIG_SCHEMA_FILE), + ("data/test_config/config_crf_custom_features.yml", CONFIG_SCHEMA_FILE), ], ) def test_validate_yaml_schema(file, schema): @@ -24,10 +28,113 @@ def test_validate_yaml_schema(file, schema): "file, schema", [ ("data/test_domains/invalid_format.yml", DOMAIN_SCHEMA_FILE), - ("examples/restaurantbot/data/nlu.md", DOMAIN_SCHEMA_FILE), ("data/test_config/example_config.yaml", CONFIG_SCHEMA_FILE), ], ) def test_validate_yaml_schema_raise_exception(file, schema): with pytest.raises(validation_utils.InvalidYamlFileError): validation_utils.validate_yaml_schema(io_utils.read_file(file), schema) + + +def test_example_training_data_is_valid(): + demo_json = "data/examples/rasa/demo-rasa.json" + data = io_utils.read_json_file(demo_json) + validation_utils.validate_training_data(data, schema.rasa_nlu_data_schema()) + + +@pytest.mark.parametrize( + "invalid_data", + [ + {"wrong_top_level": []}, + ["this is not a toplevel dict"], + { + "rasa_nlu_data": { + "common_examples": [{"intent": "some example without text"}] + } + }, + { + "rasa_nlu_data": { + "common_examples": [ + { + "text": "mytext", + "entities": [{"start": "INVALID", "end": 0, "entity": "x"}], + } + ] + } + }, + ], +) +def test_validate_training_data_is_throwing_exceptions(invalid_data): + with pytest.raises(ValidationError): + validation_utils.validate_training_data( + invalid_data, schema.rasa_nlu_data_schema() + ) + + +def test_url_data_format(): + data = """ + { + "rasa_nlu_data": { + "entity_synonyms": [ + { + "value": "nyc", + "synonyms": ["New York City", "nyc", "the big apple"] + } + ], + "common_examples" : [ + { + "text": "show me flights to New York City", + "intent": "unk", + "entities": [ + { + "entity": "destination", + "start": 19, + "end": 32, + "value": "NYC" + } + ] + } + ] + } + }""" + fname = io_utils.create_temporary_file( + data.encode(io_utils.DEFAULT_ENCODING), + suffix="_tmp_training_data.json", + mode="w+b", + ) + data = io_utils.read_json_file(fname) + assert data is not None + validation_utils.validate_training_data(data, schema.rasa_nlu_data_schema()) + + +@pytest.mark.parametrize( + "invalid_data", + [ + {"group": "a", "role": "c", "value": "text"}, + ["this is not a toplevel dict"], + {"entity": 1, "role": "c", "value": "text"}, + {"entity": "e", "role": None, "value": "text"}, + ], +) +def test_validate_entity_dict_is_throwing_exceptions(invalid_data): + with pytest.raises(ValidationError): + validation_utils.validate_training_data( + invalid_data, schema.entity_dict_schema() + ) + + +@pytest.mark.parametrize( + "data", + [ + {"entity": "e", "group": "a", "role": "c", "value": "text"}, + {"entity": "e"}, + {"entity": "e", "value": "text"}, + {"entity": "e", "group": "a"}, + {"entity": "e", "role": "c"}, + {"entity": "e", "role": "c", "value": "text"}, + {"entity": "e", "group": "a", "value": "text"}, + {"entity": "e", "group": "a", "role": "c"}, + ], +) +def test_entity_dict_is_valid(data): + validation_utils.validate_training_data(data, schema.entity_dict_schema())